2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/smp_lock.h>
38 #include <linux/highuid.h>
39 #include <linux/pagemap.h>
40 #include <linux/quotaops.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
47 static int ext4_ext_check_header(const char *function, struct inode *inode,
48 struct ext4_extent_header *eh)
50 const char *error_msg = NULL;
52 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
53 error_msg = "invalid magic";
56 if (unlikely(eh->eh_max == 0)) {
57 error_msg = "invalid eh_max";
60 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
61 error_msg = "invalid eh_entries";
67 ext4_error(inode->i_sb, function,
68 "bad header in inode #%lu: %s - magic %x, "
69 "entries %u, max %u, depth %u",
70 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
71 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
72 le16_to_cpu(eh->eh_depth));
77 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
81 if (handle->h_buffer_credits > needed)
83 if (!ext4_journal_extend(handle, needed))
85 err = ext4_journal_restart(handle, needed);
95 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
96 struct ext4_ext_path *path)
99 /* path points to block */
100 return ext4_journal_get_write_access(handle, path->p_bh);
102 /* path points to leaf/index in inode body */
103 /* we use in-core data, no need to protect them */
113 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
114 struct ext4_ext_path *path)
118 /* path points to block */
119 err = ext4_journal_dirty_metadata(handle, path->p_bh);
121 /* path points to leaf/index in inode body */
122 err = ext4_mark_inode_dirty(handle, inode);
127 static int ext4_ext_find_goal(struct inode *inode,
128 struct ext4_ext_path *path,
131 struct ext4_inode_info *ei = EXT4_I(inode);
132 unsigned long bg_start;
133 unsigned long colour;
137 struct ext4_extent *ex;
138 depth = path->p_depth;
140 /* try to predict block placement */
141 if ((ex = path[depth].p_ext))
142 return le32_to_cpu(ex->ee_start)
143 + (block - le32_to_cpu(ex->ee_block));
145 /* it looks index is empty
146 * try to find starting from index itself */
147 if (path[depth].p_bh)
148 return path[depth].p_bh->b_blocknr;
151 /* OK. use inode's group */
152 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
153 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
154 colour = (current->pid % 16) *
155 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
156 return bg_start + colour + block;
160 ext4_ext_new_block(handle_t *handle, struct inode *inode,
161 struct ext4_ext_path *path,
162 struct ext4_extent *ex, int *err)
166 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
167 newblock = ext4_new_block(handle, inode, goal, err);
171 static inline int ext4_ext_space_block(struct inode *inode)
175 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
176 / sizeof(struct ext4_extent);
177 #ifdef AGRESSIVE_TEST
184 static inline int ext4_ext_space_block_idx(struct inode *inode)
188 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
189 / sizeof(struct ext4_extent_idx);
190 #ifdef AGRESSIVE_TEST
197 static inline int ext4_ext_space_root(struct inode *inode)
201 size = sizeof(EXT4_I(inode)->i_data);
202 size -= sizeof(struct ext4_extent_header);
203 size /= sizeof(struct ext4_extent);
204 #ifdef AGRESSIVE_TEST
211 static inline int ext4_ext_space_root_idx(struct inode *inode)
215 size = sizeof(EXT4_I(inode)->i_data);
216 size -= sizeof(struct ext4_extent_header);
217 size /= sizeof(struct ext4_extent_idx);
218 #ifdef AGRESSIVE_TEST
226 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
228 int k, l = path->p_depth;
231 for (k = 0; k <= l; k++, path++) {
233 ext_debug(" %d->%d", le32_to_cpu(path->p_idx->ei_block),
234 le32_to_cpu(path->p_idx->ei_leaf));
235 } else if (path->p_ext) {
236 ext_debug(" %d:%d:%d",
237 le32_to_cpu(path->p_ext->ee_block),
238 le16_to_cpu(path->p_ext->ee_len),
239 le32_to_cpu(path->p_ext->ee_start));
246 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
248 int depth = ext_depth(inode);
249 struct ext4_extent_header *eh;
250 struct ext4_extent *ex;
256 eh = path[depth].p_hdr;
257 ex = EXT_FIRST_EXTENT(eh);
259 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
260 ext_debug("%d:%d:%d ", le32_to_cpu(ex->ee_block),
261 le16_to_cpu(ex->ee_len),
262 le32_to_cpu(ex->ee_start));
267 #define ext4_ext_show_path(inode,path)
268 #define ext4_ext_show_leaf(inode,path)
271 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
273 int depth = path->p_depth;
276 for (i = 0; i <= depth; i++, path++)
284 * binary search for closest index by given block
287 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
289 struct ext4_extent_header *eh = path->p_hdr;
290 struct ext4_extent_idx *r, *l, *m;
292 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
293 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
294 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
296 ext_debug("binsearch for %d(idx): ", block);
298 l = EXT_FIRST_INDEX(eh) + 1;
299 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
302 if (block < le32_to_cpu(m->ei_block))
306 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
307 m, m->ei_block, r, r->ei_block);
311 ext_debug(" -> %d->%d ", le32_to_cpu(path->p_idx->ei_block),
312 le32_to_cpu(path->p_idx->ei_leaf));
314 #ifdef CHECK_BINSEARCH
316 struct ext4_extent_idx *chix, *ix;
319 chix = ix = EXT_FIRST_INDEX(eh);
320 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
322 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
323 printk("k=%d, ix=0x%p, first=0x%p\n", k,
324 ix, EXT_FIRST_INDEX(eh));
326 le32_to_cpu(ix->ei_block),
327 le32_to_cpu(ix[-1].ei_block));
329 BUG_ON(k && le32_to_cpu(ix->ei_block)
330 <= le32_to_cpu(ix[-1].ei_block));
331 if (block < le32_to_cpu(ix->ei_block))
335 BUG_ON(chix != path->p_idx);
342 * binary search for closest extent by given block
345 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
347 struct ext4_extent_header *eh = path->p_hdr;
348 struct ext4_extent *r, *l, *m;
350 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
351 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
353 if (eh->eh_entries == 0) {
355 * this leaf is empty yet:
356 * we get such a leaf in split/add case
361 ext_debug("binsearch for %d: ", block);
363 l = EXT_FIRST_EXTENT(eh) + 1;
364 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
368 if (block < le32_to_cpu(m->ee_block))
372 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
373 m, m->ee_block, r, r->ee_block);
377 ext_debug(" -> %d:%d:%d ",
378 le32_to_cpu(path->p_ext->ee_block),
379 le32_to_cpu(path->p_ext->ee_start),
380 le16_to_cpu(path->p_ext->ee_len));
382 #ifdef CHECK_BINSEARCH
384 struct ext4_extent *chex, *ex;
387 chex = ex = EXT_FIRST_EXTENT(eh);
388 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
389 BUG_ON(k && le32_to_cpu(ex->ee_block)
390 <= le32_to_cpu(ex[-1].ee_block));
391 if (block < le32_to_cpu(ex->ee_block))
395 BUG_ON(chex != path->p_ext);
401 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
403 struct ext4_extent_header *eh;
405 eh = ext_inode_hdr(inode);
408 eh->eh_magic = EXT4_EXT_MAGIC;
409 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
410 ext4_mark_inode_dirty(handle, inode);
411 ext4_ext_invalidate_cache(inode);
415 struct ext4_ext_path *
416 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
418 struct ext4_extent_header *eh;
419 struct buffer_head *bh;
420 short int depth, i, ppos = 0, alloc = 0;
422 eh = ext_inode_hdr(inode);
424 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
425 return ERR_PTR(-EIO);
427 i = depth = ext_depth(inode);
429 /* account possible depth increase */
431 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
434 return ERR_PTR(-ENOMEM);
437 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
440 /* walk through the tree */
442 ext_debug("depth %d: num %d, max %d\n",
443 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
444 ext4_ext_binsearch_idx(inode, path + ppos, block);
445 path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf);
446 path[ppos].p_depth = i;
447 path[ppos].p_ext = NULL;
449 bh = sb_bread(inode->i_sb, path[ppos].p_block);
453 eh = ext_block_hdr(bh);
455 BUG_ON(ppos > depth);
456 path[ppos].p_bh = bh;
457 path[ppos].p_hdr = eh;
460 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
464 path[ppos].p_depth = i;
465 path[ppos].p_hdr = eh;
466 path[ppos].p_ext = NULL;
467 path[ppos].p_idx = NULL;
469 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
473 ext4_ext_binsearch(inode, path + ppos, block);
475 ext4_ext_show_path(inode, path);
480 ext4_ext_drop_refs(path);
483 return ERR_PTR(-EIO);
487 * insert new index [logical;ptr] into the block at cupr
488 * it check where to insert: before curp or after curp
490 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
491 struct ext4_ext_path *curp,
492 int logical, int ptr)
494 struct ext4_extent_idx *ix;
497 if ((err = ext4_ext_get_access(handle, inode, curp)))
500 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
501 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
502 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
504 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
505 len = (len - 1) * sizeof(struct ext4_extent_idx);
506 len = len < 0 ? 0 : len;
507 ext_debug("insert new index %d after: %d. "
508 "move %d from 0x%p to 0x%p\n",
510 (curp->p_idx + 1), (curp->p_idx + 2));
511 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
513 ix = curp->p_idx + 1;
516 len = len * sizeof(struct ext4_extent_idx);
517 len = len < 0 ? 0 : len;
518 ext_debug("insert new index %d before: %d. "
519 "move %d from 0x%p to 0x%p\n",
521 curp->p_idx, (curp->p_idx + 1));
522 memmove(curp->p_idx + 1, curp->p_idx, len);
526 ix->ei_block = cpu_to_le32(logical);
527 ix->ei_leaf = cpu_to_le32(ptr);
528 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
530 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
531 > le16_to_cpu(curp->p_hdr->eh_max));
532 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
534 err = ext4_ext_dirty(handle, inode, curp);
535 ext4_std_error(inode->i_sb, err);
541 * routine inserts new subtree into the path, using free index entry
543 * - allocates all needed blocks (new leaf and all intermediate index blocks)
544 * - makes decision where to split
545 * - moves remaining extens and index entries (right to the split point)
546 * into the newly allocated blocks
547 * - initialize subtree
549 static int ext4_ext_split(handle_t *handle, struct inode *inode,
550 struct ext4_ext_path *path,
551 struct ext4_extent *newext, int at)
553 struct buffer_head *bh = NULL;
554 int depth = ext_depth(inode);
555 struct ext4_extent_header *neh;
556 struct ext4_extent_idx *fidx;
557 struct ext4_extent *ex;
559 unsigned long newblock, oldblock;
561 int *ablocks = NULL; /* array of allocated blocks */
564 /* make decision: where to split? */
565 /* FIXME: now desicion is simplest: at current extent */
567 /* if current leaf will be splitted, then we should use
568 * border from split point */
569 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
570 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
571 border = path[depth].p_ext[1].ee_block;
572 ext_debug("leaf will be splitted."
573 " next leaf starts at %d\n",
574 le32_to_cpu(border));
576 border = newext->ee_block;
577 ext_debug("leaf will be added."
578 " next leaf starts at %d\n",
579 le32_to_cpu(border));
583 * if error occurs, then we break processing
584 * and turn filesystem read-only. so, index won't
585 * be inserted and tree will be in consistent
586 * state. next mount will repair buffers too
590 * get array to track all allocated blocks
591 * we need this to handle errors and free blocks
594 ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);
597 memset(ablocks, 0, sizeof(unsigned long) * depth);
599 /* allocate all needed blocks */
600 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
601 for (a = 0; a < depth - at; a++) {
602 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
605 ablocks[a] = newblock;
608 /* initialize new leaf */
609 newblock = ablocks[--a];
610 BUG_ON(newblock == 0);
611 bh = sb_getblk(inode->i_sb, newblock);
618 if ((err = ext4_journal_get_create_access(handle, bh)))
621 neh = ext_block_hdr(bh);
623 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
624 neh->eh_magic = EXT4_EXT_MAGIC;
626 ex = EXT_FIRST_EXTENT(neh);
628 /* move remain of path[depth] to the new leaf */
629 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
630 /* start copy from next extent */
631 /* TODO: we could do it by single memmove */
634 while (path[depth].p_ext <=
635 EXT_MAX_EXTENT(path[depth].p_hdr)) {
636 ext_debug("move %d:%d:%d in new leaf %lu\n",
637 le32_to_cpu(path[depth].p_ext->ee_block),
638 le32_to_cpu(path[depth].p_ext->ee_start),
639 le16_to_cpu(path[depth].p_ext->ee_len),
641 /*memmove(ex++, path[depth].p_ext++,
642 sizeof(struct ext4_extent));
648 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
649 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
652 set_buffer_uptodate(bh);
655 if ((err = ext4_journal_dirty_metadata(handle, bh)))
660 /* correct old leaf */
662 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
664 path[depth].p_hdr->eh_entries =
665 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
666 if ((err = ext4_ext_dirty(handle, inode, path + depth)))
671 /* create intermediate indexes */
675 ext_debug("create %d intermediate indices\n", k);
676 /* insert new index into current index block */
677 /* current depth stored in i var */
681 newblock = ablocks[--a];
682 bh = sb_getblk(inode->i_sb, newblock);
689 if ((err = ext4_journal_get_create_access(handle, bh)))
692 neh = ext_block_hdr(bh);
693 neh->eh_entries = cpu_to_le16(1);
694 neh->eh_magic = EXT4_EXT_MAGIC;
695 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
696 neh->eh_depth = cpu_to_le16(depth - i);
697 fidx = EXT_FIRST_INDEX(neh);
698 fidx->ei_block = border;
699 fidx->ei_leaf = cpu_to_le32(oldblock);
701 ext_debug("int.index at %d (block %lu): %lu -> %lu\n", i,
702 newblock, (unsigned long) le32_to_cpu(border),
708 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
709 EXT_MAX_INDEX(path[i].p_hdr));
710 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
711 EXT_LAST_INDEX(path[i].p_hdr));
712 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
713 ext_debug("%d: move %d:%d in new index %lu\n", i,
714 le32_to_cpu(path[i].p_idx->ei_block),
715 le32_to_cpu(path[i].p_idx->ei_leaf),
717 /*memmove(++fidx, path[i].p_idx++,
718 sizeof(struct ext4_extent_idx));
720 BUG_ON(neh->eh_entries > neh->eh_max);*/
725 memmove(++fidx, path[i].p_idx - m,
726 sizeof(struct ext4_extent_idx) * m);
728 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
730 set_buffer_uptodate(bh);
733 if ((err = ext4_journal_dirty_metadata(handle, bh)))
738 /* correct old index */
740 err = ext4_ext_get_access(handle, inode, path + i);
743 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
744 err = ext4_ext_dirty(handle, inode, path + i);
752 /* insert new index */
756 err = ext4_ext_insert_index(handle, inode, path + at,
757 le32_to_cpu(border), newblock);
761 if (buffer_locked(bh))
767 /* free all allocated blocks in error case */
768 for (i = 0; i < depth; i++) {
771 ext4_free_blocks(handle, inode, ablocks[i], 1);
780 * routine implements tree growing procedure:
781 * - allocates new block
782 * - moves top-level data (index block or leaf) into the new block
783 * - initialize new top-level, creating index that points to the
786 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
787 struct ext4_ext_path *path,
788 struct ext4_extent *newext)
790 struct ext4_ext_path *curp = path;
791 struct ext4_extent_header *neh;
792 struct ext4_extent_idx *fidx;
793 struct buffer_head *bh;
794 unsigned long newblock;
797 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
801 bh = sb_getblk(inode->i_sb, newblock);
804 ext4_std_error(inode->i_sb, err);
809 if ((err = ext4_journal_get_create_access(handle, bh))) {
814 /* move top-level index/leaf into new block */
815 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
817 /* set size of new block */
818 neh = ext_block_hdr(bh);
819 /* old root could have indexes or leaves
820 * so calculate e_max right way */
821 if (ext_depth(inode))
822 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
824 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
825 neh->eh_magic = EXT4_EXT_MAGIC;
826 set_buffer_uptodate(bh);
829 if ((err = ext4_journal_dirty_metadata(handle, bh)))
832 /* create index in new top-level index: num,max,pointer */
833 if ((err = ext4_ext_get_access(handle, inode, curp)))
836 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
837 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
838 curp->p_hdr->eh_entries = cpu_to_le16(1);
839 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
840 /* FIXME: it works, but actually path[0] can be index */
841 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
842 curp->p_idx->ei_leaf = cpu_to_le32(newblock);
844 neh = ext_inode_hdr(inode);
845 fidx = EXT_FIRST_INDEX(neh);
846 ext_debug("new root: num %d(%d), lblock %d, ptr %d\n",
847 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
848 le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf));
850 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
851 err = ext4_ext_dirty(handle, inode, curp);
859 * routine finds empty index and adds new leaf. if no free index found
860 * then it requests in-depth growing
862 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
863 struct ext4_ext_path *path,
864 struct ext4_extent *newext)
866 struct ext4_ext_path *curp;
867 int depth, i, err = 0;
870 i = depth = ext_depth(inode);
872 /* walk up to the tree and look for free index entry */
874 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
879 /* we use already allocated block for index block
880 * so, subsequent data blocks should be contigoues */
881 if (EXT_HAS_FREE_INDEX(curp)) {
882 /* if we found index with free entry, then use that
883 * entry: create all needed subtree and add new leaf */
884 err = ext4_ext_split(handle, inode, path, newext, i);
887 ext4_ext_drop_refs(path);
888 path = ext4_ext_find_extent(inode,
889 le32_to_cpu(newext->ee_block),
894 /* tree is full, time to grow in depth */
895 err = ext4_ext_grow_indepth(handle, inode, path, newext);
900 ext4_ext_drop_refs(path);
901 path = ext4_ext_find_extent(inode,
902 le32_to_cpu(newext->ee_block),
910 * only first (depth 0 -> 1) produces free space
911 * in all other cases we have to split growed tree
913 depth = ext_depth(inode);
914 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
915 /* now we need split */
925 * returns allocated block in subsequent extent or EXT_MAX_BLOCK
926 * NOTE: it consider block number from index entry as
927 * allocated block. thus, index entries have to be consistent
931 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
935 BUG_ON(path == NULL);
936 depth = path->p_depth;
938 if (depth == 0 && path->p_ext == NULL)
939 return EXT_MAX_BLOCK;
942 if (depth == path->p_depth) {
944 if (path[depth].p_ext !=
945 EXT_LAST_EXTENT(path[depth].p_hdr))
946 return le32_to_cpu(path[depth].p_ext[1].ee_block);
949 if (path[depth].p_idx !=
950 EXT_LAST_INDEX(path[depth].p_hdr))
951 return le32_to_cpu(path[depth].p_idx[1].ei_block);
956 return EXT_MAX_BLOCK;
960 * returns first allocated block from next leaf or EXT_MAX_BLOCK
962 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
963 struct ext4_ext_path *path)
967 BUG_ON(path == NULL);
968 depth = path->p_depth;
970 /* zero-tree has no leaf blocks at all */
972 return EXT_MAX_BLOCK;
974 /* go to index block */
978 if (path[depth].p_idx !=
979 EXT_LAST_INDEX(path[depth].p_hdr))
980 return le32_to_cpu(path[depth].p_idx[1].ei_block);
984 return EXT_MAX_BLOCK;
988 * if leaf gets modified and modified extent is first in the leaf
989 * then we have to correct all indexes above
990 * TODO: do we need to correct tree in all cases?
992 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
993 struct ext4_ext_path *path)
995 struct ext4_extent_header *eh;
996 int depth = ext_depth(inode);
997 struct ext4_extent *ex;
1001 eh = path[depth].p_hdr;
1002 ex = path[depth].p_ext;
1007 /* there is no tree at all */
1011 if (ex != EXT_FIRST_EXTENT(eh)) {
1012 /* we correct tree if first leaf got modified only */
1017 * TODO: we need correction if border is smaller then current one
1020 border = path[depth].p_ext->ee_block;
1021 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1023 path[k].p_idx->ei_block = border;
1024 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1028 /* change all left-side indexes */
1029 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1031 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1033 path[k].p_idx->ei_block = border;
1034 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1042 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1043 struct ext4_extent *ex2)
1045 /* FIXME: 48bit support */
1046 if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len)
1047 != le32_to_cpu(ex2->ee_block))
1050 #ifdef AGRESSIVE_TEST
1051 if (le16_to_cpu(ex1->ee_len) >= 4)
1055 if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len)
1056 == le32_to_cpu(ex2->ee_start))
1062 * this routine tries to merge requsted extent into the existing
1063 * extent or inserts requested extent as new one into the tree,
1064 * creating new leaf in no-space case
1066 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1067 struct ext4_ext_path *path,
1068 struct ext4_extent *newext)
1070 struct ext4_extent_header * eh;
1071 struct ext4_extent *ex, *fex;
1072 struct ext4_extent *nearex; /* nearest extent */
1073 struct ext4_ext_path *npath = NULL;
1074 int depth, len, err, next;
1076 BUG_ON(newext->ee_len == 0);
1077 depth = ext_depth(inode);
1078 ex = path[depth].p_ext;
1079 BUG_ON(path[depth].p_hdr == NULL);
1081 /* try to insert block into found extent and return */
1082 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1083 ext_debug("append %d block to %d:%d (from %d)\n",
1084 le16_to_cpu(newext->ee_len),
1085 le32_to_cpu(ex->ee_block),
1086 le16_to_cpu(ex->ee_len),
1087 le32_to_cpu(ex->ee_start));
1088 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1090 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1091 + le16_to_cpu(newext->ee_len));
1092 eh = path[depth].p_hdr;
1098 depth = ext_depth(inode);
1099 eh = path[depth].p_hdr;
1100 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1103 /* probably next leaf has space for us? */
1104 fex = EXT_LAST_EXTENT(eh);
1105 next = ext4_ext_next_leaf_block(inode, path);
1106 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1107 && next != EXT_MAX_BLOCK) {
1108 ext_debug("next leaf block - %d\n", next);
1109 BUG_ON(npath != NULL);
1110 npath = ext4_ext_find_extent(inode, next, NULL);
1112 return PTR_ERR(npath);
1113 BUG_ON(npath->p_depth != path->p_depth);
1114 eh = npath[depth].p_hdr;
1115 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1116 ext_debug("next leaf isnt full(%d)\n",
1117 le16_to_cpu(eh->eh_entries));
1121 ext_debug("next leaf has no free space(%d,%d)\n",
1122 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1126 * there is no free space in found leaf
1127 * we're gonna add new leaf in the tree
1129 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1132 depth = ext_depth(inode);
1133 eh = path[depth].p_hdr;
1136 nearex = path[depth].p_ext;
1138 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1142 /* there is no extent in this leaf, create first one */
1143 ext_debug("first extent in the leaf: %d:%d:%d\n",
1144 le32_to_cpu(newext->ee_block),
1145 le32_to_cpu(newext->ee_start),
1146 le16_to_cpu(newext->ee_len));
1147 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1148 } else if (le32_to_cpu(newext->ee_block)
1149 > le32_to_cpu(nearex->ee_block)) {
1150 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1151 if (nearex != EXT_LAST_EXTENT(eh)) {
1152 len = EXT_MAX_EXTENT(eh) - nearex;
1153 len = (len - 1) * sizeof(struct ext4_extent);
1154 len = len < 0 ? 0 : len;
1155 ext_debug("insert %d:%d:%d after: nearest 0x%p, "
1156 "move %d from 0x%p to 0x%p\n",
1157 le32_to_cpu(newext->ee_block),
1158 le32_to_cpu(newext->ee_start),
1159 le16_to_cpu(newext->ee_len),
1160 nearex, len, nearex + 1, nearex + 2);
1161 memmove(nearex + 2, nearex + 1, len);
1163 path[depth].p_ext = nearex + 1;
1165 BUG_ON(newext->ee_block == nearex->ee_block);
1166 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1167 len = len < 0 ? 0 : len;
1168 ext_debug("insert %d:%d:%d before: nearest 0x%p, "
1169 "move %d from 0x%p to 0x%p\n",
1170 le32_to_cpu(newext->ee_block),
1171 le32_to_cpu(newext->ee_start),
1172 le16_to_cpu(newext->ee_len),
1173 nearex, len, nearex + 1, nearex + 2);
1174 memmove(nearex + 1, nearex, len);
1175 path[depth].p_ext = nearex;
1178 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1179 nearex = path[depth].p_ext;
1180 nearex->ee_block = newext->ee_block;
1181 nearex->ee_start = newext->ee_start;
1182 nearex->ee_len = newext->ee_len;
1183 /* FIXME: support for large fs */
1184 nearex->ee_start_hi = 0;
1187 /* try to merge extents to the right */
1188 while (nearex < EXT_LAST_EXTENT(eh)) {
1189 if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
1191 /* merge with next extent! */
1192 nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
1193 + le16_to_cpu(nearex[1].ee_len));
1194 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1195 len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1196 * sizeof(struct ext4_extent);
1197 memmove(nearex + 1, nearex + 2, len);
1199 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1200 BUG_ON(eh->eh_entries == 0);
1203 /* try to merge extents to the left */
1205 /* time to correct all indexes above */
1206 err = ext4_ext_correct_indexes(handle, inode, path);
1210 err = ext4_ext_dirty(handle, inode, path + depth);
1214 ext4_ext_drop_refs(npath);
1217 ext4_ext_tree_changed(inode);
1218 ext4_ext_invalidate_cache(inode);
1222 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1223 unsigned long num, ext_prepare_callback func,
1226 struct ext4_ext_path *path = NULL;
1227 struct ext4_ext_cache cbex;
1228 struct ext4_extent *ex;
1229 unsigned long next, start = 0, end = 0;
1230 unsigned long last = block + num;
1231 int depth, exists, err = 0;
1233 BUG_ON(func == NULL);
1234 BUG_ON(inode == NULL);
1236 while (block < last && block != EXT_MAX_BLOCK) {
1238 /* find extent for this block */
1239 path = ext4_ext_find_extent(inode, block, path);
1241 err = PTR_ERR(path);
1246 depth = ext_depth(inode);
1247 BUG_ON(path[depth].p_hdr == NULL);
1248 ex = path[depth].p_ext;
1249 next = ext4_ext_next_allocated_block(path);
1253 /* there is no extent yet, so try to allocate
1254 * all requested space */
1257 } else if (le32_to_cpu(ex->ee_block) > block) {
1258 /* need to allocate space before found extent */
1260 end = le32_to_cpu(ex->ee_block);
1261 if (block + num < end)
1264 le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
1265 /* need to allocate space after found extent */
1270 } else if (block >= le32_to_cpu(ex->ee_block)) {
1272 * some part of requested space is covered
1276 end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
1277 if (block + num < end)
1283 BUG_ON(end <= start);
1286 cbex.ec_block = start;
1287 cbex.ec_len = end - start;
1289 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1291 cbex.ec_block = le32_to_cpu(ex->ee_block);
1292 cbex.ec_len = le16_to_cpu(ex->ee_len);
1293 cbex.ec_start = le32_to_cpu(ex->ee_start);
1294 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1297 BUG_ON(cbex.ec_len == 0);
1298 err = func(inode, path, &cbex, cbdata);
1299 ext4_ext_drop_refs(path);
1303 if (err == EXT_REPEAT)
1305 else if (err == EXT_BREAK) {
1310 if (ext_depth(inode) != depth) {
1311 /* depth was changed. we have to realloc path */
1316 block = cbex.ec_block + cbex.ec_len;
1320 ext4_ext_drop_refs(path);
1328 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1329 __u32 len, __u32 start, int type)
1331 struct ext4_ext_cache *cex;
1333 cex = &EXT4_I(inode)->i_cached_extent;
1334 cex->ec_type = type;
1335 cex->ec_block = block;
1337 cex->ec_start = start;
1341 * this routine calculate boundaries of the gap requested block fits into
1342 * and cache this gap
1345 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1346 unsigned long block)
1348 int depth = ext_depth(inode);
1349 unsigned long lblock, len;
1350 struct ext4_extent *ex;
1352 ex = path[depth].p_ext;
1354 /* there is no extent yet, so gap is [0;-] */
1356 len = EXT_MAX_BLOCK;
1357 ext_debug("cache gap(whole file):");
1358 } else if (block < le32_to_cpu(ex->ee_block)) {
1360 len = le32_to_cpu(ex->ee_block) - block;
1361 ext_debug("cache gap(before): %lu [%lu:%lu]",
1362 (unsigned long) block,
1363 (unsigned long) le32_to_cpu(ex->ee_block),
1364 (unsigned long) le16_to_cpu(ex->ee_len));
1365 } else if (block >= le32_to_cpu(ex->ee_block)
1366 + le16_to_cpu(ex->ee_len)) {
1367 lblock = le32_to_cpu(ex->ee_block)
1368 + le16_to_cpu(ex->ee_len);
1369 len = ext4_ext_next_allocated_block(path);
1370 ext_debug("cache gap(after): [%lu:%lu] %lu",
1371 (unsigned long) le32_to_cpu(ex->ee_block),
1372 (unsigned long) le16_to_cpu(ex->ee_len),
1373 (unsigned long) block);
1374 BUG_ON(len == lblock);
1381 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1382 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1386 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1387 struct ext4_extent *ex)
1389 struct ext4_ext_cache *cex;
1391 cex = &EXT4_I(inode)->i_cached_extent;
1393 /* has cache valid data? */
1394 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1395 return EXT4_EXT_CACHE_NO;
1397 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1398 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1399 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1400 ex->ee_block = cpu_to_le32(cex->ec_block);
1401 ex->ee_start = cpu_to_le32(cex->ec_start);
1402 ex->ee_len = cpu_to_le16(cex->ec_len);
1403 ext_debug("%lu cached by %lu:%lu:%lu\n",
1404 (unsigned long) block,
1405 (unsigned long) cex->ec_block,
1406 (unsigned long) cex->ec_len,
1407 (unsigned long) cex->ec_start);
1408 return cex->ec_type;
1412 return EXT4_EXT_CACHE_NO;
1416 * routine removes index from the index block
1417 * it's used in truncate case only. thus all requests are for
1418 * last index in the block only
1420 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1421 struct ext4_ext_path *path)
1423 struct buffer_head *bh;
1427 /* free index block */
1429 leaf = le32_to_cpu(path->p_idx->ei_leaf);
1430 BUG_ON(path->p_hdr->eh_entries == 0);
1431 if ((err = ext4_ext_get_access(handle, inode, path)))
1433 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1434 if ((err = ext4_ext_dirty(handle, inode, path)))
1436 ext_debug("index is empty, remove it, free block %lu\n", leaf);
1437 bh = sb_find_get_block(inode->i_sb, leaf);
1438 ext4_forget(handle, 1, inode, bh, leaf);
1439 ext4_free_blocks(handle, inode, leaf, 1);
1444 * This routine returns max. credits extent tree can consume.
1445 * It should be OK for low-performance paths like ->writepage()
1446 * To allow many writing process to fit a single transaction,
1447 * caller should calculate credits under truncate_mutex and
1450 int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1451 struct ext4_ext_path *path)
1456 /* probably there is space in leaf? */
1457 depth = ext_depth(inode);
1458 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1459 < le16_to_cpu(path[depth].p_hdr->eh_max))
1464 * given 32bit logical block (4294967296 blocks), max. tree
1465 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1466 * let's also add one more level for imbalance.
1470 /* allocation of new data block(s) */
1474 * tree can be full, so it'd need to grow in depth:
1475 * allocation + old root + new root
1477 needed += 2 + 1 + 1;
1480 * Index split can happen, we'd need:
1481 * allocate intermediate indexes (bitmap + group)
1482 * + change two blocks at each level, but root (already included)
1484 needed = (depth * 2) + (depth * 2);
1486 /* any allocation modifies superblock */
1492 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1493 struct ext4_extent *ex,
1494 unsigned long from, unsigned long to)
1496 struct buffer_head *bh;
1499 #ifdef EXTENTS_STATS
1501 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1502 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1503 spin_lock(&sbi->s_ext_stats_lock);
1504 sbi->s_ext_blocks += ee_len;
1505 sbi->s_ext_extents++;
1506 if (ee_len < sbi->s_ext_min)
1507 sbi->s_ext_min = ee_len;
1508 if (ee_len > sbi->s_ext_max)
1509 sbi->s_ext_max = ee_len;
1510 if (ext_depth(inode) > sbi->s_depth_max)
1511 sbi->s_depth_max = ext_depth(inode);
1512 spin_unlock(&sbi->s_ext_stats_lock);
1515 if (from >= le32_to_cpu(ex->ee_block)
1516 && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1518 unsigned long num, start;
1519 num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
1520 start = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - num;
1521 ext_debug("free last %lu blocks starting %lu\n", num, start);
1522 for (i = 0; i < num; i++) {
1523 bh = sb_find_get_block(inode->i_sb, start + i);
1524 ext4_forget(handle, 0, inode, bh, start + i);
1526 ext4_free_blocks(handle, inode, start, num);
1527 } else if (from == le32_to_cpu(ex->ee_block)
1528 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1529 printk("strange request: removal %lu-%lu from %u:%u\n",
1530 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1532 printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1533 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1539 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1540 struct ext4_ext_path *path, unsigned long start)
1542 int err = 0, correct_index = 0;
1543 int depth = ext_depth(inode), credits;
1544 struct ext4_extent_header *eh;
1545 unsigned a, b, block, num;
1546 unsigned long ex_ee_block;
1547 unsigned short ex_ee_len;
1548 struct ext4_extent *ex;
1550 ext_debug("truncate since %lu in leaf\n", start);
1551 if (!path[depth].p_hdr)
1552 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1553 eh = path[depth].p_hdr;
1555 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1556 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1558 /* find where to start removing */
1559 ex = EXT_LAST_EXTENT(eh);
1561 ex_ee_block = le32_to_cpu(ex->ee_block);
1562 ex_ee_len = le16_to_cpu(ex->ee_len);
1564 while (ex >= EXT_FIRST_EXTENT(eh) &&
1565 ex_ee_block + ex_ee_len > start) {
1566 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1567 path[depth].p_ext = ex;
1569 a = ex_ee_block > start ? ex_ee_block : start;
1570 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1571 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1573 ext_debug(" border %u:%u\n", a, b);
1575 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1579 } else if (a != ex_ee_block) {
1580 /* remove tail of the extent */
1581 block = ex_ee_block;
1583 } else if (b != ex_ee_block + ex_ee_len - 1) {
1584 /* remove head of the extent */
1587 /* there is no "make a hole" API yet */
1590 /* remove whole extent: excellent! */
1591 block = ex_ee_block;
1593 BUG_ON(a != ex_ee_block);
1594 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1597 /* at present, extent can't cross block group */
1598 /* leaf + bitmap + group desc + sb + inode */
1600 if (ex == EXT_FIRST_EXTENT(eh)) {
1602 credits += (ext_depth(inode)) + 1;
1605 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1608 handle = ext4_ext_journal_restart(handle, credits);
1609 if (IS_ERR(handle)) {
1610 err = PTR_ERR(handle);
1614 err = ext4_ext_get_access(handle, inode, path + depth);
1618 err = ext4_remove_blocks(handle, inode, ex, a, b);
1623 /* this extent is removed entirely mark slot unused */
1625 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1628 ex->ee_block = cpu_to_le32(block);
1629 ex->ee_len = cpu_to_le16(num);
1631 err = ext4_ext_dirty(handle, inode, path + depth);
1635 ext_debug("new extent: %u:%u:%u\n", block, num,
1636 le32_to_cpu(ex->ee_start));
1638 ex_ee_block = le32_to_cpu(ex->ee_block);
1639 ex_ee_len = le16_to_cpu(ex->ee_len);
1642 if (correct_index && eh->eh_entries)
1643 err = ext4_ext_correct_indexes(handle, inode, path);
1645 /* if this leaf is free, then we should
1646 * remove it from index block above */
1647 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1648 err = ext4_ext_rm_idx(handle, inode, path + depth);
1655 * returns 1 if current index have to be freed (even partial)
1658 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1660 BUG_ON(path->p_idx == NULL);
1662 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1666 * if truncate on deeper level happened it it wasn't partial
1667 * so we have to consider current index for truncation
1669 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1674 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1676 struct super_block *sb = inode->i_sb;
1677 int depth = ext_depth(inode);
1678 struct ext4_ext_path *path;
1682 ext_debug("truncate since %lu\n", start);
1684 /* probably first extent we're gonna free will be last in block */
1685 handle = ext4_journal_start(inode, depth + 1);
1687 return PTR_ERR(handle);
1689 ext4_ext_invalidate_cache(inode);
1692 * we start scanning from right side freeing all the blocks
1693 * after i_size and walking into the deep
1695 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1697 ext4_journal_stop(handle);
1700 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1701 path[0].p_hdr = ext_inode_hdr(inode);
1702 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1706 path[0].p_depth = depth;
1708 while (i >= 0 && err == 0) {
1710 /* this is leaf block */
1711 err = ext4_ext_rm_leaf(handle, inode, path, start);
1712 /* root level have p_bh == NULL, brelse() eats this */
1713 brelse(path[i].p_bh);
1714 path[i].p_bh = NULL;
1719 /* this is index block */
1720 if (!path[i].p_hdr) {
1721 ext_debug("initialize header\n");
1722 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1723 if (ext4_ext_check_header(__FUNCTION__, inode,
1730 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1731 > le16_to_cpu(path[i].p_hdr->eh_max));
1732 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1734 if (!path[i].p_idx) {
1735 /* this level hasn't touched yet */
1736 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1737 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1738 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1740 le16_to_cpu(path[i].p_hdr->eh_entries));
1742 /* we've already was here, see at next index */
1746 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1747 i, EXT_FIRST_INDEX(path[i].p_hdr),
1749 if (ext4_ext_more_to_rm(path + i)) {
1750 /* go to the next level */
1751 ext_debug("move to level %d (block %d)\n",
1752 i + 1, le32_to_cpu(path[i].p_idx->ei_leaf));
1753 memset(path + i + 1, 0, sizeof(*path));
1755 sb_bread(sb, le32_to_cpu(path[i].p_idx->ei_leaf));
1756 if (!path[i+1].p_bh) {
1757 /* should we reset i_size? */
1762 /* put actual number of indexes to know is this
1763 * number got changed at the next iteration */
1764 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1767 /* we finish processing this index, go up */
1768 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1769 /* index is empty, remove it
1770 * handle must be already prepared by the
1771 * truncatei_leaf() */
1772 err = ext4_ext_rm_idx(handle, inode, path + i);
1774 /* root level have p_bh == NULL, brelse() eats this */
1775 brelse(path[i].p_bh);
1776 path[i].p_bh = NULL;
1778 ext_debug("return to level %d\n", i);
1782 /* TODO: flexible tree reduction should be here */
1783 if (path->p_hdr->eh_entries == 0) {
1785 * truncate to zero freed all the tree
1786 * so, we need to correct eh_depth
1788 err = ext4_ext_get_access(handle, inode, path);
1790 ext_inode_hdr(inode)->eh_depth = 0;
1791 ext_inode_hdr(inode)->eh_max =
1792 cpu_to_le16(ext4_ext_space_root(inode));
1793 err = ext4_ext_dirty(handle, inode, path);
1797 ext4_ext_tree_changed(inode);
1798 ext4_ext_drop_refs(path);
1800 ext4_journal_stop(handle);
1806 * called at mount time
1808 void ext4_ext_init(struct super_block *sb)
1811 * possible initialization would be here
1814 if (test_opt(sb, EXTENTS)) {
1815 printk("EXT4-fs: file extents enabled");
1816 #ifdef AGRESSIVE_TEST
1817 printk(", agressive tests");
1819 #ifdef CHECK_BINSEARCH
1820 printk(", check binsearch");
1822 #ifdef EXTENTS_STATS
1826 #ifdef EXTENTS_STATS
1827 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
1828 EXT4_SB(sb)->s_ext_min = 1 << 30;
1829 EXT4_SB(sb)->s_ext_max = 0;
1835 * called at umount time
1837 void ext4_ext_release(struct super_block *sb)
1839 if (!test_opt(sb, EXTENTS))
1842 #ifdef EXTENTS_STATS
1843 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
1844 struct ext4_sb_info *sbi = EXT4_SB(sb);
1845 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
1846 sbi->s_ext_blocks, sbi->s_ext_extents,
1847 sbi->s_ext_blocks / sbi->s_ext_extents);
1848 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
1849 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
1854 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, sector_t iblock,
1855 unsigned long max_blocks, struct buffer_head *bh_result,
1856 int create, int extend_disksize)
1858 struct ext4_ext_path *path = NULL;
1859 struct ext4_extent newex, *ex;
1860 int goal, newblock, err = 0, depth;
1861 unsigned long allocated = 0;
1863 __clear_bit(BH_New, &bh_result->b_state);
1864 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
1865 max_blocks, (unsigned) inode->i_ino);
1866 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1868 /* check in cache */
1869 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
1870 if (goal == EXT4_EXT_CACHE_GAP) {
1872 /* block isn't allocated yet and
1873 * user don't want to allocate it */
1876 /* we should allocate requested block */
1877 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
1878 /* block is already allocated */
1880 - le32_to_cpu(newex.ee_block)
1881 + le32_to_cpu(newex.ee_start);
1882 /* number of remain blocks in the extent */
1883 allocated = le16_to_cpu(newex.ee_len) -
1884 (iblock - le32_to_cpu(newex.ee_block));
1891 /* find extent for this block */
1892 path = ext4_ext_find_extent(inode, iblock, NULL);
1894 err = PTR_ERR(path);
1899 depth = ext_depth(inode);
1902 * consistent leaf must not be empty
1903 * this situations is possible, though, _during_ tree modification
1904 * this is why assert can't be put in ext4_ext_find_extent()
1906 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1908 if ((ex = path[depth].p_ext)) {
1909 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1910 unsigned long ee_start = le32_to_cpu(ex->ee_start);
1911 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1912 /* if found exent covers block, simple return it */
1913 if (iblock >= ee_block && iblock < ee_block + ee_len) {
1914 newblock = iblock - ee_block + ee_start;
1915 /* number of remain blocks in the extent */
1916 allocated = ee_len - (iblock - ee_block);
1917 ext_debug("%d fit into %lu:%d -> %d\n", (int) iblock,
1918 ee_block, ee_len, newblock);
1919 ext4_ext_put_in_cache(inode, ee_block, ee_len,
1920 ee_start, EXT4_EXT_CACHE_EXTENT);
1926 * requested block isn't allocated yet
1927 * we couldn't try to create block if create flag is zero
1930 /* put just found gap into cache to speedup subsequest reqs */
1931 ext4_ext_put_gap_in_cache(inode, path, iblock);
1935 * Okay, we need to do block allocation. Lazily initialize the block
1936 * allocation info here if necessary
1938 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
1939 ext4_init_block_alloc_info(inode);
1941 /* allocate new block */
1942 goal = ext4_ext_find_goal(inode, path, iblock);
1943 allocated = max_blocks;
1944 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
1947 ext_debug("allocate new block: goal %d, found %d/%lu\n",
1948 goal, newblock, allocated);
1950 /* try to insert new extent into found leaf and return */
1951 newex.ee_block = cpu_to_le32(iblock);
1952 newex.ee_start = cpu_to_le32(newblock);
1953 newex.ee_len = cpu_to_le16(allocated);
1954 err = ext4_ext_insert_extent(handle, inode, path, &newex);
1958 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
1959 EXT4_I(inode)->i_disksize = inode->i_size;
1961 /* previous routine could use block we allocated */
1962 newblock = le32_to_cpu(newex.ee_start);
1963 __set_bit(BH_New, &bh_result->b_state);
1965 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
1966 EXT4_EXT_CACHE_EXTENT);
1968 if (allocated > max_blocks)
1969 allocated = max_blocks;
1970 ext4_ext_show_leaf(inode, path);
1971 __set_bit(BH_Mapped, &bh_result->b_state);
1972 bh_result->b_bdev = inode->i_sb->s_bdev;
1973 bh_result->b_blocknr = newblock;
1976 ext4_ext_drop_refs(path);
1979 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
1981 return err ? err : allocated;
1984 void ext4_ext_truncate(struct inode * inode, struct page *page)
1986 struct address_space *mapping = inode->i_mapping;
1987 struct super_block *sb = inode->i_sb;
1988 unsigned long last_block;
1993 * probably first extent we're gonna free will be last in block
1995 err = ext4_writepage_trans_blocks(inode) + 3;
1996 handle = ext4_journal_start(inode, err);
1997 if (IS_ERR(handle)) {
1999 clear_highpage(page);
2000 flush_dcache_page(page);
2002 page_cache_release(page);
2008 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2010 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2011 ext4_ext_invalidate_cache(inode);
2014 * TODO: optimization is possible here
2015 * probably we need not scaning at all,
2016 * because page truncation is enough
2018 if (ext4_orphan_add(handle, inode))
2021 /* we have to know where to truncate from in crash case */
2022 EXT4_I(inode)->i_disksize = inode->i_size;
2023 ext4_mark_inode_dirty(handle, inode);
2025 last_block = (inode->i_size + sb->s_blocksize - 1)
2026 >> EXT4_BLOCK_SIZE_BITS(sb);
2027 err = ext4_ext_remove_space(inode, last_block);
2029 /* In a multi-transaction truncate, we only make the final
2030 * transaction synchronous */
2036 * If this was a simple ftruncate(), and the file will remain alive
2037 * then we need to clear up the orphan record which we created above.
2038 * However, if this was a real unlink then we were called by
2039 * ext4_delete_inode(), and we allow that function to clean up the
2040 * orphan info for us.
2043 ext4_orphan_del(handle, inode);
2045 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2046 ext4_journal_stop(handle);
2050 * this routine calculate max number of blocks we could modify
2051 * in order to allocate new block for an inode
2053 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2057 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2059 /* caller want to allocate num blocks, but note it includes sb */
2060 needed = needed * num - (num - 1);
2063 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2069 EXPORT_SYMBOL(ext4_mark_inode_dirty);
2070 EXPORT_SYMBOL(ext4_ext_invalidate_cache);
2071 EXPORT_SYMBOL(ext4_ext_insert_extent);
2072 EXPORT_SYMBOL(ext4_ext_walk_space);
2073 EXPORT_SYMBOL(ext4_ext_find_goal);
2074 EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);