]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/ext4/extents.c
ext4: Make ext4_ext_find_extent fills ext_path completely
[linux-2.6-omap-h63xx.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
45
46
47 /*
48  * ext_pblock:
49  * combine low and high parts of physical block number into ext4_fsblk_t
50  */
51 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52 {
53         ext4_fsblk_t block;
54
55         block = le32_to_cpu(ex->ee_start_lo);
56         block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
57         return block;
58 }
59
60 /*
61  * idx_pblock:
62  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
63  */
64 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
65 {
66         ext4_fsblk_t block;
67
68         block = le32_to_cpu(ix->ei_leaf_lo);
69         block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
70         return block;
71 }
72
73 /*
74  * ext4_ext_store_pblock:
75  * stores a large physical block number into an extent struct,
76  * breaking it into parts
77  */
78 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
79 {
80         ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81         ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
82 }
83
84 /*
85  * ext4_idx_store_pblock:
86  * stores a large physical block number into an index struct,
87  * breaking it into parts
88  */
89 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
90 {
91         ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92         ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
93 }
94
95 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
96 {
97         int err;
98
99         if (handle->h_buffer_credits > needed)
100                 return handle;
101         if (!ext4_journal_extend(handle, needed))
102                 return handle;
103         err = ext4_journal_restart(handle, needed);
104
105         return handle;
106 }
107
108 /*
109  * could return:
110  *  - EROFS
111  *  - ENOMEM
112  */
113 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
114                                 struct ext4_ext_path *path)
115 {
116         if (path->p_bh) {
117                 /* path points to block */
118                 return ext4_journal_get_write_access(handle, path->p_bh);
119         }
120         /* path points to leaf/index in inode body */
121         /* we use in-core data, no need to protect them */
122         return 0;
123 }
124
125 /*
126  * could return:
127  *  - EROFS
128  *  - ENOMEM
129  *  - EIO
130  */
131 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
132                                 struct ext4_ext_path *path)
133 {
134         int err;
135         if (path->p_bh) {
136                 /* path points to block */
137                 err = ext4_journal_dirty_metadata(handle, path->p_bh);
138         } else {
139                 /* path points to leaf/index in inode body */
140                 err = ext4_mark_inode_dirty(handle, inode);
141         }
142         return err;
143 }
144
145 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
146                               struct ext4_ext_path *path,
147                               ext4_lblk_t block)
148 {
149         struct ext4_inode_info *ei = EXT4_I(inode);
150         ext4_fsblk_t bg_start;
151         ext4_fsblk_t last_block;
152         ext4_grpblk_t colour;
153         int depth;
154
155         if (path) {
156                 struct ext4_extent *ex;
157                 depth = path->p_depth;
158
159                 /* try to predict block placement */
160                 ex = path[depth].p_ext;
161                 if (ex)
162                         return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
163
164                 /* it looks like index is empty;
165                  * try to find starting block from index itself */
166                 if (path[depth].p_bh)
167                         return path[depth].p_bh->b_blocknr;
168         }
169
170         /* OK. use inode's group */
171         bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
172                 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
173         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
174
175         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
176                 colour = (current->pid % 16) *
177                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
178         else
179                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
180         return bg_start + colour + block;
181 }
182
183 static ext4_fsblk_t
184 ext4_ext_new_block(handle_t *handle, struct inode *inode,
185                         struct ext4_ext_path *path,
186                         struct ext4_extent *ex, int *err)
187 {
188         ext4_fsblk_t goal, newblock;
189
190         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
191         newblock = ext4_new_block(handle, inode, goal, err);
192         return newblock;
193 }
194
195 static int ext4_ext_space_block(struct inode *inode)
196 {
197         int size;
198
199         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
200                         / sizeof(struct ext4_extent);
201 #ifdef AGGRESSIVE_TEST
202         if (size > 6)
203                 size = 6;
204 #endif
205         return size;
206 }
207
208 static int ext4_ext_space_block_idx(struct inode *inode)
209 {
210         int size;
211
212         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
213                         / sizeof(struct ext4_extent_idx);
214 #ifdef AGGRESSIVE_TEST
215         if (size > 5)
216                 size = 5;
217 #endif
218         return size;
219 }
220
221 static int ext4_ext_space_root(struct inode *inode)
222 {
223         int size;
224
225         size = sizeof(EXT4_I(inode)->i_data);
226         size -= sizeof(struct ext4_extent_header);
227         size /= sizeof(struct ext4_extent);
228 #ifdef AGGRESSIVE_TEST
229         if (size > 3)
230                 size = 3;
231 #endif
232         return size;
233 }
234
235 static int ext4_ext_space_root_idx(struct inode *inode)
236 {
237         int size;
238
239         size = sizeof(EXT4_I(inode)->i_data);
240         size -= sizeof(struct ext4_extent_header);
241         size /= sizeof(struct ext4_extent_idx);
242 #ifdef AGGRESSIVE_TEST
243         if (size > 4)
244                 size = 4;
245 #endif
246         return size;
247 }
248
249 static int
250 ext4_ext_max_entries(struct inode *inode, int depth)
251 {
252         int max;
253
254         if (depth == ext_depth(inode)) {
255                 if (depth == 0)
256                         max = ext4_ext_space_root(inode);
257                 else
258                         max = ext4_ext_space_root_idx(inode);
259         } else {
260                 if (depth == 0)
261                         max = ext4_ext_space_block(inode);
262                 else
263                         max = ext4_ext_space_block_idx(inode);
264         }
265
266         return max;
267 }
268
269 static int __ext4_ext_check_header(const char *function, struct inode *inode,
270                                         struct ext4_extent_header *eh,
271                                         int depth)
272 {
273         const char *error_msg;
274         int max = 0;
275
276         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
277                 error_msg = "invalid magic";
278                 goto corrupted;
279         }
280         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
281                 error_msg = "unexpected eh_depth";
282                 goto corrupted;
283         }
284         if (unlikely(eh->eh_max == 0)) {
285                 error_msg = "invalid eh_max";
286                 goto corrupted;
287         }
288         max = ext4_ext_max_entries(inode, depth);
289         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
290                 error_msg = "too large eh_max";
291                 goto corrupted;
292         }
293         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
294                 error_msg = "invalid eh_entries";
295                 goto corrupted;
296         }
297         return 0;
298
299 corrupted:
300         ext4_error(inode->i_sb, function,
301                         "bad header in inode #%lu: %s - magic %x, "
302                         "entries %u, max %u(%u), depth %u(%u)",
303                         inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
304                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
305                         max, le16_to_cpu(eh->eh_depth), depth);
306
307         return -EIO;
308 }
309
310 #define ext4_ext_check_header(inode, eh, depth) \
311         __ext4_ext_check_header(__func__, inode, eh, depth)
312
313 #ifdef EXT_DEBUG
314 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
315 {
316         int k, l = path->p_depth;
317
318         ext_debug("path:");
319         for (k = 0; k <= l; k++, path++) {
320                 if (path->p_idx) {
321                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
322                             idx_pblock(path->p_idx));
323                 } else if (path->p_ext) {
324                         ext_debug("  %d:%d:%llu ",
325                                   le32_to_cpu(path->p_ext->ee_block),
326                                   ext4_ext_get_actual_len(path->p_ext),
327                                   ext_pblock(path->p_ext));
328                 } else
329                         ext_debug("  []");
330         }
331         ext_debug("\n");
332 }
333
334 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
335 {
336         int depth = ext_depth(inode);
337         struct ext4_extent_header *eh;
338         struct ext4_extent *ex;
339         int i;
340
341         if (!path)
342                 return;
343
344         eh = path[depth].p_hdr;
345         ex = EXT_FIRST_EXTENT(eh);
346
347         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
348                 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
349                           ext4_ext_get_actual_len(ex), ext_pblock(ex));
350         }
351         ext_debug("\n");
352 }
353 #else
354 #define ext4_ext_show_path(inode,path)
355 #define ext4_ext_show_leaf(inode,path)
356 #endif
357
358 void ext4_ext_drop_refs(struct ext4_ext_path *path)
359 {
360         int depth = path->p_depth;
361         int i;
362
363         for (i = 0; i <= depth; i++, path++)
364                 if (path->p_bh) {
365                         brelse(path->p_bh);
366                         path->p_bh = NULL;
367                 }
368 }
369
370 /*
371  * ext4_ext_binsearch_idx:
372  * binary search for the closest index of the given block
373  * the header must be checked before calling this
374  */
375 static void
376 ext4_ext_binsearch_idx(struct inode *inode,
377                         struct ext4_ext_path *path, ext4_lblk_t block)
378 {
379         struct ext4_extent_header *eh = path->p_hdr;
380         struct ext4_extent_idx *r, *l, *m;
381
382
383         ext_debug("binsearch for %u(idx):  ", block);
384
385         l = EXT_FIRST_INDEX(eh) + 1;
386         r = EXT_LAST_INDEX(eh);
387         while (l <= r) {
388                 m = l + (r - l) / 2;
389                 if (block < le32_to_cpu(m->ei_block))
390                         r = m - 1;
391                 else
392                         l = m + 1;
393                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
394                                 m, le32_to_cpu(m->ei_block),
395                                 r, le32_to_cpu(r->ei_block));
396         }
397
398         path->p_idx = l - 1;
399         ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
400                   idx_pblock(path->p_idx));
401
402 #ifdef CHECK_BINSEARCH
403         {
404                 struct ext4_extent_idx *chix, *ix;
405                 int k;
406
407                 chix = ix = EXT_FIRST_INDEX(eh);
408                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
409                   if (k != 0 &&
410                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
411                                 printk("k=%d, ix=0x%p, first=0x%p\n", k,
412                                         ix, EXT_FIRST_INDEX(eh));
413                                 printk("%u <= %u\n",
414                                        le32_to_cpu(ix->ei_block),
415                                        le32_to_cpu(ix[-1].ei_block));
416                         }
417                         BUG_ON(k && le32_to_cpu(ix->ei_block)
418                                            <= le32_to_cpu(ix[-1].ei_block));
419                         if (block < le32_to_cpu(ix->ei_block))
420                                 break;
421                         chix = ix;
422                 }
423                 BUG_ON(chix != path->p_idx);
424         }
425 #endif
426
427 }
428
429 /*
430  * ext4_ext_binsearch:
431  * binary search for closest extent of the given block
432  * the header must be checked before calling this
433  */
434 static void
435 ext4_ext_binsearch(struct inode *inode,
436                 struct ext4_ext_path *path, ext4_lblk_t block)
437 {
438         struct ext4_extent_header *eh = path->p_hdr;
439         struct ext4_extent *r, *l, *m;
440
441         if (eh->eh_entries == 0) {
442                 /*
443                  * this leaf is empty:
444                  * we get such a leaf in split/add case
445                  */
446                 return;
447         }
448
449         ext_debug("binsearch for %u:  ", block);
450
451         l = EXT_FIRST_EXTENT(eh) + 1;
452         r = EXT_LAST_EXTENT(eh);
453
454         while (l <= r) {
455                 m = l + (r - l) / 2;
456                 if (block < le32_to_cpu(m->ee_block))
457                         r = m - 1;
458                 else
459                         l = m + 1;
460                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
461                                 m, le32_to_cpu(m->ee_block),
462                                 r, le32_to_cpu(r->ee_block));
463         }
464
465         path->p_ext = l - 1;
466         ext_debug("  -> %d:%llu:%d ",
467                         le32_to_cpu(path->p_ext->ee_block),
468                         ext_pblock(path->p_ext),
469                         ext4_ext_get_actual_len(path->p_ext));
470
471 #ifdef CHECK_BINSEARCH
472         {
473                 struct ext4_extent *chex, *ex;
474                 int k;
475
476                 chex = ex = EXT_FIRST_EXTENT(eh);
477                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
478                         BUG_ON(k && le32_to_cpu(ex->ee_block)
479                                           <= le32_to_cpu(ex[-1].ee_block));
480                         if (block < le32_to_cpu(ex->ee_block))
481                                 break;
482                         chex = ex;
483                 }
484                 BUG_ON(chex != path->p_ext);
485         }
486 #endif
487
488 }
489
490 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
491 {
492         struct ext4_extent_header *eh;
493
494         eh = ext_inode_hdr(inode);
495         eh->eh_depth = 0;
496         eh->eh_entries = 0;
497         eh->eh_magic = EXT4_EXT_MAGIC;
498         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
499         ext4_mark_inode_dirty(handle, inode);
500         ext4_ext_invalidate_cache(inode);
501         return 0;
502 }
503
504 struct ext4_ext_path *
505 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
506                                         struct ext4_ext_path *path)
507 {
508         struct ext4_extent_header *eh;
509         struct buffer_head *bh;
510         short int depth, i, ppos = 0, alloc = 0;
511
512         eh = ext_inode_hdr(inode);
513         depth = ext_depth(inode);
514         if (ext4_ext_check_header(inode, eh, depth))
515                 return ERR_PTR(-EIO);
516
517
518         /* account possible depth increase */
519         if (!path) {
520                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
521                                 GFP_NOFS);
522                 if (!path)
523                         return ERR_PTR(-ENOMEM);
524                 alloc = 1;
525         }
526         path[0].p_hdr = eh;
527         path[0].p_bh = NULL;
528
529         i = depth;
530         /* walk through the tree */
531         while (i) {
532                 ext_debug("depth %d: num %d, max %d\n",
533                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
534
535                 ext4_ext_binsearch_idx(inode, path + ppos, block);
536                 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
537                 path[ppos].p_depth = i;
538                 path[ppos].p_ext = NULL;
539
540                 bh = sb_bread(inode->i_sb, path[ppos].p_block);
541                 if (!bh)
542                         goto err;
543
544                 eh = ext_block_hdr(bh);
545                 ppos++;
546                 BUG_ON(ppos > depth);
547                 path[ppos].p_bh = bh;
548                 path[ppos].p_hdr = eh;
549                 i--;
550
551                 if (ext4_ext_check_header(inode, eh, i))
552                         goto err;
553         }
554
555         path[ppos].p_depth = i;
556         path[ppos].p_ext = NULL;
557         path[ppos].p_idx = NULL;
558
559         /* find extent */
560         ext4_ext_binsearch(inode, path + ppos, block);
561         /* if not an empty leaf */
562         if (path[ppos].p_ext)
563                 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
564
565         ext4_ext_show_path(inode, path);
566
567         return path;
568
569 err:
570         ext4_ext_drop_refs(path);
571         if (alloc)
572                 kfree(path);
573         return ERR_PTR(-EIO);
574 }
575
576 /*
577  * ext4_ext_insert_index:
578  * insert new index [@logical;@ptr] into the block at @curp;
579  * check where to insert: before @curp or after @curp
580  */
581 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
582                                 struct ext4_ext_path *curp,
583                                 int logical, ext4_fsblk_t ptr)
584 {
585         struct ext4_extent_idx *ix;
586         int len, err;
587
588         err = ext4_ext_get_access(handle, inode, curp);
589         if (err)
590                 return err;
591
592         BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
593         len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
594         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
595                 /* insert after */
596                 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
597                         len = (len - 1) * sizeof(struct ext4_extent_idx);
598                         len = len < 0 ? 0 : len;
599                         ext_debug("insert new index %d after: %llu. "
600                                         "move %d from 0x%p to 0x%p\n",
601                                         logical, ptr, len,
602                                         (curp->p_idx + 1), (curp->p_idx + 2));
603                         memmove(curp->p_idx + 2, curp->p_idx + 1, len);
604                 }
605                 ix = curp->p_idx + 1;
606         } else {
607                 /* insert before */
608                 len = len * sizeof(struct ext4_extent_idx);
609                 len = len < 0 ? 0 : len;
610                 ext_debug("insert new index %d before: %llu. "
611                                 "move %d from 0x%p to 0x%p\n",
612                                 logical, ptr, len,
613                                 curp->p_idx, (curp->p_idx + 1));
614                 memmove(curp->p_idx + 1, curp->p_idx, len);
615                 ix = curp->p_idx;
616         }
617
618         ix->ei_block = cpu_to_le32(logical);
619         ext4_idx_store_pblock(ix, ptr);
620         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
621
622         BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
623                              > le16_to_cpu(curp->p_hdr->eh_max));
624         BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
625
626         err = ext4_ext_dirty(handle, inode, curp);
627         ext4_std_error(inode->i_sb, err);
628
629         return err;
630 }
631
632 /*
633  * ext4_ext_split:
634  * inserts new subtree into the path, using free index entry
635  * at depth @at:
636  * - allocates all needed blocks (new leaf and all intermediate index blocks)
637  * - makes decision where to split
638  * - moves remaining extents and index entries (right to the split point)
639  *   into the newly allocated blocks
640  * - initializes subtree
641  */
642 static int ext4_ext_split(handle_t *handle, struct inode *inode,
643                                 struct ext4_ext_path *path,
644                                 struct ext4_extent *newext, int at)
645 {
646         struct buffer_head *bh = NULL;
647         int depth = ext_depth(inode);
648         struct ext4_extent_header *neh;
649         struct ext4_extent_idx *fidx;
650         struct ext4_extent *ex;
651         int i = at, k, m, a;
652         ext4_fsblk_t newblock, oldblock;
653         __le32 border;
654         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
655         int err = 0;
656
657         /* make decision: where to split? */
658         /* FIXME: now decision is simplest: at current extent */
659
660         /* if current leaf will be split, then we should use
661          * border from split point */
662         BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
663         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
664                 border = path[depth].p_ext[1].ee_block;
665                 ext_debug("leaf will be split."
666                                 " next leaf starts at %d\n",
667                                   le32_to_cpu(border));
668         } else {
669                 border = newext->ee_block;
670                 ext_debug("leaf will be added."
671                                 " next leaf starts at %d\n",
672                                 le32_to_cpu(border));
673         }
674
675         /*
676          * If error occurs, then we break processing
677          * and mark filesystem read-only. index won't
678          * be inserted and tree will be in consistent
679          * state. Next mount will repair buffers too.
680          */
681
682         /*
683          * Get array to track all allocated blocks.
684          * We need this to handle errors and free blocks
685          * upon them.
686          */
687         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
688         if (!ablocks)
689                 return -ENOMEM;
690
691         /* allocate all needed blocks */
692         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
693         for (a = 0; a < depth - at; a++) {
694                 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
695                 if (newblock == 0)
696                         goto cleanup;
697                 ablocks[a] = newblock;
698         }
699
700         /* initialize new leaf */
701         newblock = ablocks[--a];
702         BUG_ON(newblock == 0);
703         bh = sb_getblk(inode->i_sb, newblock);
704         if (!bh) {
705                 err = -EIO;
706                 goto cleanup;
707         }
708         lock_buffer(bh);
709
710         err = ext4_journal_get_create_access(handle, bh);
711         if (err)
712                 goto cleanup;
713
714         neh = ext_block_hdr(bh);
715         neh->eh_entries = 0;
716         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
717         neh->eh_magic = EXT4_EXT_MAGIC;
718         neh->eh_depth = 0;
719         ex = EXT_FIRST_EXTENT(neh);
720
721         /* move remainder of path[depth] to the new leaf */
722         BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
723         /* start copy from next extent */
724         /* TODO: we could do it by single memmove */
725         m = 0;
726         path[depth].p_ext++;
727         while (path[depth].p_ext <=
728                         EXT_MAX_EXTENT(path[depth].p_hdr)) {
729                 ext_debug("move %d:%llu:%d in new leaf %llu\n",
730                                 le32_to_cpu(path[depth].p_ext->ee_block),
731                                 ext_pblock(path[depth].p_ext),
732                                 ext4_ext_get_actual_len(path[depth].p_ext),
733                                 newblock);
734                 /*memmove(ex++, path[depth].p_ext++,
735                                 sizeof(struct ext4_extent));
736                 neh->eh_entries++;*/
737                 path[depth].p_ext++;
738                 m++;
739         }
740         if (m) {
741                 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
742                 le16_add_cpu(&neh->eh_entries, m);
743         }
744
745         set_buffer_uptodate(bh);
746         unlock_buffer(bh);
747
748         err = ext4_journal_dirty_metadata(handle, bh);
749         if (err)
750                 goto cleanup;
751         brelse(bh);
752         bh = NULL;
753
754         /* correct old leaf */
755         if (m) {
756                 err = ext4_ext_get_access(handle, inode, path + depth);
757                 if (err)
758                         goto cleanup;
759                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
760                 err = ext4_ext_dirty(handle, inode, path + depth);
761                 if (err)
762                         goto cleanup;
763
764         }
765
766         /* create intermediate indexes */
767         k = depth - at - 1;
768         BUG_ON(k < 0);
769         if (k)
770                 ext_debug("create %d intermediate indices\n", k);
771         /* insert new index into current index block */
772         /* current depth stored in i var */
773         i = depth - 1;
774         while (k--) {
775                 oldblock = newblock;
776                 newblock = ablocks[--a];
777                 bh = sb_getblk(inode->i_sb, newblock);
778                 if (!bh) {
779                         err = -EIO;
780                         goto cleanup;
781                 }
782                 lock_buffer(bh);
783
784                 err = ext4_journal_get_create_access(handle, bh);
785                 if (err)
786                         goto cleanup;
787
788                 neh = ext_block_hdr(bh);
789                 neh->eh_entries = cpu_to_le16(1);
790                 neh->eh_magic = EXT4_EXT_MAGIC;
791                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
792                 neh->eh_depth = cpu_to_le16(depth - i);
793                 fidx = EXT_FIRST_INDEX(neh);
794                 fidx->ei_block = border;
795                 ext4_idx_store_pblock(fidx, oldblock);
796
797                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
798                                 i, newblock, le32_to_cpu(border), oldblock);
799                 /* copy indexes */
800                 m = 0;
801                 path[i].p_idx++;
802
803                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
804                                 EXT_MAX_INDEX(path[i].p_hdr));
805                 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
806                                 EXT_LAST_INDEX(path[i].p_hdr));
807                 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
808                         ext_debug("%d: move %d:%llu in new index %llu\n", i,
809                                         le32_to_cpu(path[i].p_idx->ei_block),
810                                         idx_pblock(path[i].p_idx),
811                                         newblock);
812                         /*memmove(++fidx, path[i].p_idx++,
813                                         sizeof(struct ext4_extent_idx));
814                         neh->eh_entries++;
815                         BUG_ON(neh->eh_entries > neh->eh_max);*/
816                         path[i].p_idx++;
817                         m++;
818                 }
819                 if (m) {
820                         memmove(++fidx, path[i].p_idx - m,
821                                 sizeof(struct ext4_extent_idx) * m);
822                         le16_add_cpu(&neh->eh_entries, m);
823                 }
824                 set_buffer_uptodate(bh);
825                 unlock_buffer(bh);
826
827                 err = ext4_journal_dirty_metadata(handle, bh);
828                 if (err)
829                         goto cleanup;
830                 brelse(bh);
831                 bh = NULL;
832
833                 /* correct old index */
834                 if (m) {
835                         err = ext4_ext_get_access(handle, inode, path + i);
836                         if (err)
837                                 goto cleanup;
838                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
839                         err = ext4_ext_dirty(handle, inode, path + i);
840                         if (err)
841                                 goto cleanup;
842                 }
843
844                 i--;
845         }
846
847         /* insert new index */
848         err = ext4_ext_insert_index(handle, inode, path + at,
849                                     le32_to_cpu(border), newblock);
850
851 cleanup:
852         if (bh) {
853                 if (buffer_locked(bh))
854                         unlock_buffer(bh);
855                 brelse(bh);
856         }
857
858         if (err) {
859                 /* free all allocated blocks in error case */
860                 for (i = 0; i < depth; i++) {
861                         if (!ablocks[i])
862                                 continue;
863                         ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
864                 }
865         }
866         kfree(ablocks);
867
868         return err;
869 }
870
871 /*
872  * ext4_ext_grow_indepth:
873  * implements tree growing procedure:
874  * - allocates new block
875  * - moves top-level data (index block or leaf) into the new block
876  * - initializes new top-level, creating index that points to the
877  *   just created block
878  */
879 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
880                                         struct ext4_ext_path *path,
881                                         struct ext4_extent *newext)
882 {
883         struct ext4_ext_path *curp = path;
884         struct ext4_extent_header *neh;
885         struct ext4_extent_idx *fidx;
886         struct buffer_head *bh;
887         ext4_fsblk_t newblock;
888         int err = 0;
889
890         newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
891         if (newblock == 0)
892                 return err;
893
894         bh = sb_getblk(inode->i_sb, newblock);
895         if (!bh) {
896                 err = -EIO;
897                 ext4_std_error(inode->i_sb, err);
898                 return err;
899         }
900         lock_buffer(bh);
901
902         err = ext4_journal_get_create_access(handle, bh);
903         if (err) {
904                 unlock_buffer(bh);
905                 goto out;
906         }
907
908         /* move top-level index/leaf into new block */
909         memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
910
911         /* set size of new block */
912         neh = ext_block_hdr(bh);
913         /* old root could have indexes or leaves
914          * so calculate e_max right way */
915         if (ext_depth(inode))
916           neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
917         else
918           neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
919         neh->eh_magic = EXT4_EXT_MAGIC;
920         set_buffer_uptodate(bh);
921         unlock_buffer(bh);
922
923         err = ext4_journal_dirty_metadata(handle, bh);
924         if (err)
925                 goto out;
926
927         /* create index in new top-level index: num,max,pointer */
928         err = ext4_ext_get_access(handle, inode, curp);
929         if (err)
930                 goto out;
931
932         curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
933         curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
934         curp->p_hdr->eh_entries = cpu_to_le16(1);
935         curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
936
937         if (path[0].p_hdr->eh_depth)
938                 curp->p_idx->ei_block =
939                         EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
940         else
941                 curp->p_idx->ei_block =
942                         EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
943         ext4_idx_store_pblock(curp->p_idx, newblock);
944
945         neh = ext_inode_hdr(inode);
946         fidx = EXT_FIRST_INDEX(neh);
947         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
948                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
949                   le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
950
951         neh->eh_depth = cpu_to_le16(path->p_depth + 1);
952         err = ext4_ext_dirty(handle, inode, curp);
953 out:
954         brelse(bh);
955
956         return err;
957 }
958
959 /*
960  * ext4_ext_create_new_leaf:
961  * finds empty index and adds new leaf.
962  * if no free index is found, then it requests in-depth growing.
963  */
964 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
965                                         struct ext4_ext_path *path,
966                                         struct ext4_extent *newext)
967 {
968         struct ext4_ext_path *curp;
969         int depth, i, err = 0;
970
971 repeat:
972         i = depth = ext_depth(inode);
973
974         /* walk up to the tree and look for free index entry */
975         curp = path + depth;
976         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
977                 i--;
978                 curp--;
979         }
980
981         /* we use already allocated block for index block,
982          * so subsequent data blocks should be contiguous */
983         if (EXT_HAS_FREE_INDEX(curp)) {
984                 /* if we found index with free entry, then use that
985                  * entry: create all needed subtree and add new leaf */
986                 err = ext4_ext_split(handle, inode, path, newext, i);
987                 if (err)
988                         goto out;
989
990                 /* refill path */
991                 ext4_ext_drop_refs(path);
992                 path = ext4_ext_find_extent(inode,
993                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
994                                     path);
995                 if (IS_ERR(path))
996                         err = PTR_ERR(path);
997         } else {
998                 /* tree is full, time to grow in depth */
999                 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1000                 if (err)
1001                         goto out;
1002
1003                 /* refill path */
1004                 ext4_ext_drop_refs(path);
1005                 path = ext4_ext_find_extent(inode,
1006                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1007                                     path);
1008                 if (IS_ERR(path)) {
1009                         err = PTR_ERR(path);
1010                         goto out;
1011                 }
1012
1013                 /*
1014                  * only first (depth 0 -> 1) produces free space;
1015                  * in all other cases we have to split the grown tree
1016                  */
1017                 depth = ext_depth(inode);
1018                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1019                         /* now we need to split */
1020                         goto repeat;
1021                 }
1022         }
1023
1024 out:
1025         return err;
1026 }
1027
1028 /*
1029  * search the closest allocated block to the left for *logical
1030  * and returns it at @logical + it's physical address at @phys
1031  * if *logical is the smallest allocated block, the function
1032  * returns 0 at @phys
1033  * return value contains 0 (success) or error code
1034  */
1035 int
1036 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1037                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1038 {
1039         struct ext4_extent_idx *ix;
1040         struct ext4_extent *ex;
1041         int depth, ee_len;
1042
1043         BUG_ON(path == NULL);
1044         depth = path->p_depth;
1045         *phys = 0;
1046
1047         if (depth == 0 && path->p_ext == NULL)
1048                 return 0;
1049
1050         /* usually extent in the path covers blocks smaller
1051          * then *logical, but it can be that extent is the
1052          * first one in the file */
1053
1054         ex = path[depth].p_ext;
1055         ee_len = ext4_ext_get_actual_len(ex);
1056         if (*logical < le32_to_cpu(ex->ee_block)) {
1057                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1058                 while (--depth >= 0) {
1059                         ix = path[depth].p_idx;
1060                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1061                 }
1062                 return 0;
1063         }
1064
1065         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1066
1067         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1068         *phys = ext_pblock(ex) + ee_len - 1;
1069         return 0;
1070 }
1071
1072 /*
1073  * search the closest allocated block to the right for *logical
1074  * and returns it at @logical + it's physical address at @phys
1075  * if *logical is the smallest allocated block, the function
1076  * returns 0 at @phys
1077  * return value contains 0 (success) or error code
1078  */
1079 int
1080 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1081                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1082 {
1083         struct buffer_head *bh = NULL;
1084         struct ext4_extent_header *eh;
1085         struct ext4_extent_idx *ix;
1086         struct ext4_extent *ex;
1087         ext4_fsblk_t block;
1088         int depth, ee_len;
1089
1090         BUG_ON(path == NULL);
1091         depth = path->p_depth;
1092         *phys = 0;
1093
1094         if (depth == 0 && path->p_ext == NULL)
1095                 return 0;
1096
1097         /* usually extent in the path covers blocks smaller
1098          * then *logical, but it can be that extent is the
1099          * first one in the file */
1100
1101         ex = path[depth].p_ext;
1102         ee_len = ext4_ext_get_actual_len(ex);
1103         if (*logical < le32_to_cpu(ex->ee_block)) {
1104                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1105                 while (--depth >= 0) {
1106                         ix = path[depth].p_idx;
1107                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1108                 }
1109                 *logical = le32_to_cpu(ex->ee_block);
1110                 *phys = ext_pblock(ex);
1111                 return 0;
1112         }
1113
1114         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1115
1116         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1117                 /* next allocated block in this leaf */
1118                 ex++;
1119                 *logical = le32_to_cpu(ex->ee_block);
1120                 *phys = ext_pblock(ex);
1121                 return 0;
1122         }
1123
1124         /* go up and search for index to the right */
1125         while (--depth >= 0) {
1126                 ix = path[depth].p_idx;
1127                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1128                         break;
1129         }
1130
1131         if (depth < 0) {
1132                 /* we've gone up to the root and
1133                  * found no index to the right */
1134                 return 0;
1135         }
1136
1137         /* we've found index to the right, let's
1138          * follow it and find the closest allocated
1139          * block to the right */
1140         ix++;
1141         block = idx_pblock(ix);
1142         while (++depth < path->p_depth) {
1143                 bh = sb_bread(inode->i_sb, block);
1144                 if (bh == NULL)
1145                         return -EIO;
1146                 eh = ext_block_hdr(bh);
1147                 if (ext4_ext_check_header(inode, eh, depth)) {
1148                         put_bh(bh);
1149                         return -EIO;
1150                 }
1151                 ix = EXT_FIRST_INDEX(eh);
1152                 block = idx_pblock(ix);
1153                 put_bh(bh);
1154         }
1155
1156         bh = sb_bread(inode->i_sb, block);
1157         if (bh == NULL)
1158                 return -EIO;
1159         eh = ext_block_hdr(bh);
1160         if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
1161                 put_bh(bh);
1162                 return -EIO;
1163         }
1164         ex = EXT_FIRST_EXTENT(eh);
1165         *logical = le32_to_cpu(ex->ee_block);
1166         *phys = ext_pblock(ex);
1167         put_bh(bh);
1168         return 0;
1169
1170 }
1171
1172 /*
1173  * ext4_ext_next_allocated_block:
1174  * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1175  * NOTE: it considers block number from index entry as
1176  * allocated block. Thus, index entries have to be consistent
1177  * with leaves.
1178  */
1179 static ext4_lblk_t
1180 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1181 {
1182         int depth;
1183
1184         BUG_ON(path == NULL);
1185         depth = path->p_depth;
1186
1187         if (depth == 0 && path->p_ext == NULL)
1188                 return EXT_MAX_BLOCK;
1189
1190         while (depth >= 0) {
1191                 if (depth == path->p_depth) {
1192                         /* leaf */
1193                         if (path[depth].p_ext !=
1194                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1195                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1196                 } else {
1197                         /* index */
1198                         if (path[depth].p_idx !=
1199                                         EXT_LAST_INDEX(path[depth].p_hdr))
1200                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1201                 }
1202                 depth--;
1203         }
1204
1205         return EXT_MAX_BLOCK;
1206 }
1207
1208 /*
1209  * ext4_ext_next_leaf_block:
1210  * returns first allocated block from next leaf or EXT_MAX_BLOCK
1211  */
1212 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1213                                         struct ext4_ext_path *path)
1214 {
1215         int depth;
1216
1217         BUG_ON(path == NULL);
1218         depth = path->p_depth;
1219
1220         /* zero-tree has no leaf blocks at all */
1221         if (depth == 0)
1222                 return EXT_MAX_BLOCK;
1223
1224         /* go to index block */
1225         depth--;
1226
1227         while (depth >= 0) {
1228                 if (path[depth].p_idx !=
1229                                 EXT_LAST_INDEX(path[depth].p_hdr))
1230                         return (ext4_lblk_t)
1231                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1232                 depth--;
1233         }
1234
1235         return EXT_MAX_BLOCK;
1236 }
1237
1238 /*
1239  * ext4_ext_correct_indexes:
1240  * if leaf gets modified and modified extent is first in the leaf,
1241  * then we have to correct all indexes above.
1242  * TODO: do we need to correct tree in all cases?
1243  */
1244 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1245                                 struct ext4_ext_path *path)
1246 {
1247         struct ext4_extent_header *eh;
1248         int depth = ext_depth(inode);
1249         struct ext4_extent *ex;
1250         __le32 border;
1251         int k, err = 0;
1252
1253         eh = path[depth].p_hdr;
1254         ex = path[depth].p_ext;
1255         BUG_ON(ex == NULL);
1256         BUG_ON(eh == NULL);
1257
1258         if (depth == 0) {
1259                 /* there is no tree at all */
1260                 return 0;
1261         }
1262
1263         if (ex != EXT_FIRST_EXTENT(eh)) {
1264                 /* we correct tree if first leaf got modified only */
1265                 return 0;
1266         }
1267
1268         /*
1269          * TODO: we need correction if border is smaller than current one
1270          */
1271         k = depth - 1;
1272         border = path[depth].p_ext->ee_block;
1273         err = ext4_ext_get_access(handle, inode, path + k);
1274         if (err)
1275                 return err;
1276         path[k].p_idx->ei_block = border;
1277         err = ext4_ext_dirty(handle, inode, path + k);
1278         if (err)
1279                 return err;
1280
1281         while (k--) {
1282                 /* change all left-side indexes */
1283                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1284                         break;
1285                 err = ext4_ext_get_access(handle, inode, path + k);
1286                 if (err)
1287                         break;
1288                 path[k].p_idx->ei_block = border;
1289                 err = ext4_ext_dirty(handle, inode, path + k);
1290                 if (err)
1291                         break;
1292         }
1293
1294         return err;
1295 }
1296
1297 static int
1298 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1299                                 struct ext4_extent *ex2)
1300 {
1301         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1302
1303         /*
1304          * Make sure that either both extents are uninitialized, or
1305          * both are _not_.
1306          */
1307         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1308                 return 0;
1309
1310         if (ext4_ext_is_uninitialized(ex1))
1311                 max_len = EXT_UNINIT_MAX_LEN;
1312         else
1313                 max_len = EXT_INIT_MAX_LEN;
1314
1315         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1316         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1317
1318         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1319                         le32_to_cpu(ex2->ee_block))
1320                 return 0;
1321
1322         /*
1323          * To allow future support for preallocated extents to be added
1324          * as an RO_COMPAT feature, refuse to merge to extents if
1325          * this can result in the top bit of ee_len being set.
1326          */
1327         if (ext1_ee_len + ext2_ee_len > max_len)
1328                 return 0;
1329 #ifdef AGGRESSIVE_TEST
1330         if (ext1_ee_len >= 4)
1331                 return 0;
1332 #endif
1333
1334         if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1335                 return 1;
1336         return 0;
1337 }
1338
1339 /*
1340  * This function tries to merge the "ex" extent to the next extent in the tree.
1341  * It always tries to merge towards right. If you want to merge towards
1342  * left, pass "ex - 1" as argument instead of "ex".
1343  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1344  * 1 if they got merged.
1345  */
1346 int ext4_ext_try_to_merge(struct inode *inode,
1347                           struct ext4_ext_path *path,
1348                           struct ext4_extent *ex)
1349 {
1350         struct ext4_extent_header *eh;
1351         unsigned int depth, len;
1352         int merge_done = 0;
1353         int uninitialized = 0;
1354
1355         depth = ext_depth(inode);
1356         BUG_ON(path[depth].p_hdr == NULL);
1357         eh = path[depth].p_hdr;
1358
1359         while (ex < EXT_LAST_EXTENT(eh)) {
1360                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1361                         break;
1362                 /* merge with next extent! */
1363                 if (ext4_ext_is_uninitialized(ex))
1364                         uninitialized = 1;
1365                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1366                                 + ext4_ext_get_actual_len(ex + 1));
1367                 if (uninitialized)
1368                         ext4_ext_mark_uninitialized(ex);
1369
1370                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1371                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1372                                 * sizeof(struct ext4_extent);
1373                         memmove(ex + 1, ex + 2, len);
1374                 }
1375                 le16_add_cpu(&eh->eh_entries, -1);
1376                 merge_done = 1;
1377                 WARN_ON(eh->eh_entries == 0);
1378                 if (!eh->eh_entries)
1379                         ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1380                            "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1381         }
1382
1383         return merge_done;
1384 }
1385
1386 /*
1387  * check if a portion of the "newext" extent overlaps with an
1388  * existing extent.
1389  *
1390  * If there is an overlap discovered, it updates the length of the newext
1391  * such that there will be no overlap, and then returns 1.
1392  * If there is no overlap found, it returns 0.
1393  */
1394 unsigned int ext4_ext_check_overlap(struct inode *inode,
1395                                     struct ext4_extent *newext,
1396                                     struct ext4_ext_path *path)
1397 {
1398         ext4_lblk_t b1, b2;
1399         unsigned int depth, len1;
1400         unsigned int ret = 0;
1401
1402         b1 = le32_to_cpu(newext->ee_block);
1403         len1 = ext4_ext_get_actual_len(newext);
1404         depth = ext_depth(inode);
1405         if (!path[depth].p_ext)
1406                 goto out;
1407         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1408
1409         /*
1410          * get the next allocated block if the extent in the path
1411          * is before the requested block(s) 
1412          */
1413         if (b2 < b1) {
1414                 b2 = ext4_ext_next_allocated_block(path);
1415                 if (b2 == EXT_MAX_BLOCK)
1416                         goto out;
1417         }
1418
1419         /* check for wrap through zero on extent logical start block*/
1420         if (b1 + len1 < b1) {
1421                 len1 = EXT_MAX_BLOCK - b1;
1422                 newext->ee_len = cpu_to_le16(len1);
1423                 ret = 1;
1424         }
1425
1426         /* check for overlap */
1427         if (b1 + len1 > b2) {
1428                 newext->ee_len = cpu_to_le16(b2 - b1);
1429                 ret = 1;
1430         }
1431 out:
1432         return ret;
1433 }
1434
1435 /*
1436  * ext4_ext_insert_extent:
1437  * tries to merge requsted extent into the existing extent or
1438  * inserts requested extent as new one into the tree,
1439  * creating new leaf in the no-space case.
1440  */
1441 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1442                                 struct ext4_ext_path *path,
1443                                 struct ext4_extent *newext)
1444 {
1445         struct ext4_extent_header * eh;
1446         struct ext4_extent *ex, *fex;
1447         struct ext4_extent *nearex; /* nearest extent */
1448         struct ext4_ext_path *npath = NULL;
1449         int depth, len, err;
1450         ext4_lblk_t next;
1451         unsigned uninitialized = 0;
1452
1453         BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1454         depth = ext_depth(inode);
1455         ex = path[depth].p_ext;
1456         BUG_ON(path[depth].p_hdr == NULL);
1457
1458         /* try to insert block into found extent and return */
1459         if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1460                 ext_debug("append %d block to %d:%d (from %llu)\n",
1461                                 ext4_ext_get_actual_len(newext),
1462                                 le32_to_cpu(ex->ee_block),
1463                                 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1464                 err = ext4_ext_get_access(handle, inode, path + depth);
1465                 if (err)
1466                         return err;
1467
1468                 /*
1469                  * ext4_can_extents_be_merged should have checked that either
1470                  * both extents are uninitialized, or both aren't. Thus we
1471                  * need to check only one of them here.
1472                  */
1473                 if (ext4_ext_is_uninitialized(ex))
1474                         uninitialized = 1;
1475                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1476                                         + ext4_ext_get_actual_len(newext));
1477                 if (uninitialized)
1478                         ext4_ext_mark_uninitialized(ex);
1479                 eh = path[depth].p_hdr;
1480                 nearex = ex;
1481                 goto merge;
1482         }
1483
1484 repeat:
1485         depth = ext_depth(inode);
1486         eh = path[depth].p_hdr;
1487         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1488                 goto has_space;
1489
1490         /* probably next leaf has space for us? */
1491         fex = EXT_LAST_EXTENT(eh);
1492         next = ext4_ext_next_leaf_block(inode, path);
1493         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1494             && next != EXT_MAX_BLOCK) {
1495                 ext_debug("next leaf block - %d\n", next);
1496                 BUG_ON(npath != NULL);
1497                 npath = ext4_ext_find_extent(inode, next, NULL);
1498                 if (IS_ERR(npath))
1499                         return PTR_ERR(npath);
1500                 BUG_ON(npath->p_depth != path->p_depth);
1501                 eh = npath[depth].p_hdr;
1502                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1503                         ext_debug("next leaf isnt full(%d)\n",
1504                                   le16_to_cpu(eh->eh_entries));
1505                         path = npath;
1506                         goto repeat;
1507                 }
1508                 ext_debug("next leaf has no free space(%d,%d)\n",
1509                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1510         }
1511
1512         /*
1513          * There is no free space in the found leaf.
1514          * We're gonna add a new leaf in the tree.
1515          */
1516         err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1517         if (err)
1518                 goto cleanup;
1519         depth = ext_depth(inode);
1520         eh = path[depth].p_hdr;
1521
1522 has_space:
1523         nearex = path[depth].p_ext;
1524
1525         err = ext4_ext_get_access(handle, inode, path + depth);
1526         if (err)
1527                 goto cleanup;
1528
1529         if (!nearex) {
1530                 /* there is no extent in this leaf, create first one */
1531                 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1532                                 le32_to_cpu(newext->ee_block),
1533                                 ext_pblock(newext),
1534                                 ext4_ext_get_actual_len(newext));
1535                 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1536         } else if (le32_to_cpu(newext->ee_block)
1537                            > le32_to_cpu(nearex->ee_block)) {
1538 /*              BUG_ON(newext->ee_block == nearex->ee_block); */
1539                 if (nearex != EXT_LAST_EXTENT(eh)) {
1540                         len = EXT_MAX_EXTENT(eh) - nearex;
1541                         len = (len - 1) * sizeof(struct ext4_extent);
1542                         len = len < 0 ? 0 : len;
1543                         ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1544                                         "move %d from 0x%p to 0x%p\n",
1545                                         le32_to_cpu(newext->ee_block),
1546                                         ext_pblock(newext),
1547                                         ext4_ext_get_actual_len(newext),
1548                                         nearex, len, nearex + 1, nearex + 2);
1549                         memmove(nearex + 2, nearex + 1, len);
1550                 }
1551                 path[depth].p_ext = nearex + 1;
1552         } else {
1553                 BUG_ON(newext->ee_block == nearex->ee_block);
1554                 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1555                 len = len < 0 ? 0 : len;
1556                 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1557                                 "move %d from 0x%p to 0x%p\n",
1558                                 le32_to_cpu(newext->ee_block),
1559                                 ext_pblock(newext),
1560                                 ext4_ext_get_actual_len(newext),
1561                                 nearex, len, nearex + 1, nearex + 2);
1562                 memmove(nearex + 1, nearex, len);
1563                 path[depth].p_ext = nearex;
1564         }
1565
1566         le16_add_cpu(&eh->eh_entries, 1);
1567         nearex = path[depth].p_ext;
1568         nearex->ee_block = newext->ee_block;
1569         ext4_ext_store_pblock(nearex, ext_pblock(newext));
1570         nearex->ee_len = newext->ee_len;
1571
1572 merge:
1573         /* try to merge extents to the right */
1574         ext4_ext_try_to_merge(inode, path, nearex);
1575
1576         /* try to merge extents to the left */
1577
1578         /* time to correct all indexes above */
1579         err = ext4_ext_correct_indexes(handle, inode, path);
1580         if (err)
1581                 goto cleanup;
1582
1583         err = ext4_ext_dirty(handle, inode, path + depth);
1584
1585 cleanup:
1586         if (npath) {
1587                 ext4_ext_drop_refs(npath);
1588                 kfree(npath);
1589         }
1590         ext4_ext_tree_changed(inode);
1591         ext4_ext_invalidate_cache(inode);
1592         return err;
1593 }
1594
1595 static void
1596 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1597                         __u32 len, ext4_fsblk_t start, int type)
1598 {
1599         struct ext4_ext_cache *cex;
1600         BUG_ON(len == 0);
1601         cex = &EXT4_I(inode)->i_cached_extent;
1602         cex->ec_type = type;
1603         cex->ec_block = block;
1604         cex->ec_len = len;
1605         cex->ec_start = start;
1606 }
1607
1608 /*
1609  * ext4_ext_put_gap_in_cache:
1610  * calculate boundaries of the gap that the requested block fits into
1611  * and cache this gap
1612  */
1613 static void
1614 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1615                                 ext4_lblk_t block)
1616 {
1617         int depth = ext_depth(inode);
1618         unsigned long len;
1619         ext4_lblk_t lblock;
1620         struct ext4_extent *ex;
1621
1622         ex = path[depth].p_ext;
1623         if (ex == NULL) {
1624                 /* there is no extent yet, so gap is [0;-] */
1625                 lblock = 0;
1626                 len = EXT_MAX_BLOCK;
1627                 ext_debug("cache gap(whole file):");
1628         } else if (block < le32_to_cpu(ex->ee_block)) {
1629                 lblock = block;
1630                 len = le32_to_cpu(ex->ee_block) - block;
1631                 ext_debug("cache gap(before): %u [%u:%u]",
1632                                 block,
1633                                 le32_to_cpu(ex->ee_block),
1634                                  ext4_ext_get_actual_len(ex));
1635         } else if (block >= le32_to_cpu(ex->ee_block)
1636                         + ext4_ext_get_actual_len(ex)) {
1637                 ext4_lblk_t next;
1638                 lblock = le32_to_cpu(ex->ee_block)
1639                         + ext4_ext_get_actual_len(ex);
1640
1641                 next = ext4_ext_next_allocated_block(path);
1642                 ext_debug("cache gap(after): [%u:%u] %u",
1643                                 le32_to_cpu(ex->ee_block),
1644                                 ext4_ext_get_actual_len(ex),
1645                                 block);
1646                 BUG_ON(next == lblock);
1647                 len = next - lblock;
1648         } else {
1649                 lblock = len = 0;
1650                 BUG();
1651         }
1652
1653         ext_debug(" -> %u:%lu\n", lblock, len);
1654         ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1655 }
1656
1657 static int
1658 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1659                         struct ext4_extent *ex)
1660 {
1661         struct ext4_ext_cache *cex;
1662
1663         cex = &EXT4_I(inode)->i_cached_extent;
1664
1665         /* has cache valid data? */
1666         if (cex->ec_type == EXT4_EXT_CACHE_NO)
1667                 return EXT4_EXT_CACHE_NO;
1668
1669         BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1670                         cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1671         if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1672                 ex->ee_block = cpu_to_le32(cex->ec_block);
1673                 ext4_ext_store_pblock(ex, cex->ec_start);
1674                 ex->ee_len = cpu_to_le16(cex->ec_len);
1675                 ext_debug("%u cached by %u:%u:%llu\n",
1676                                 block,
1677                                 cex->ec_block, cex->ec_len, cex->ec_start);
1678                 return cex->ec_type;
1679         }
1680
1681         /* not in cache */
1682         return EXT4_EXT_CACHE_NO;
1683 }
1684
1685 /*
1686  * ext4_ext_rm_idx:
1687  * removes index from the index block.
1688  * It's used in truncate case only, thus all requests are for
1689  * last index in the block only.
1690  */
1691 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1692                         struct ext4_ext_path *path)
1693 {
1694         struct buffer_head *bh;
1695         int err;
1696         ext4_fsblk_t leaf;
1697
1698         /* free index block */
1699         path--;
1700         leaf = idx_pblock(path->p_idx);
1701         BUG_ON(path->p_hdr->eh_entries == 0);
1702         err = ext4_ext_get_access(handle, inode, path);
1703         if (err)
1704                 return err;
1705         le16_add_cpu(&path->p_hdr->eh_entries, -1);
1706         err = ext4_ext_dirty(handle, inode, path);
1707         if (err)
1708                 return err;
1709         ext_debug("index is empty, remove it, free block %llu\n", leaf);
1710         bh = sb_find_get_block(inode->i_sb, leaf);
1711         ext4_forget(handle, 1, inode, bh, leaf);
1712         ext4_free_blocks(handle, inode, leaf, 1, 1);
1713         return err;
1714 }
1715
1716 /*
1717  * ext4_ext_calc_credits_for_insert:
1718  * This routine returns max. credits that the extent tree can consume.
1719  * It should be OK for low-performance paths like ->writepage()
1720  * To allow many writing processes to fit into a single transaction,
1721  * the caller should calculate credits under i_data_sem and
1722  * pass the actual path.
1723  */
1724 int ext4_ext_calc_credits_for_insert(struct inode *inode,
1725                                                 struct ext4_ext_path *path)
1726 {
1727         int depth, needed;
1728
1729         if (path) {
1730                 /* probably there is space in leaf? */
1731                 depth = ext_depth(inode);
1732                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1733                                 < le16_to_cpu(path[depth].p_hdr->eh_max))
1734                         return 1;
1735         }
1736
1737         /*
1738          * given 32-bit logical block (4294967296 blocks), max. tree
1739          * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1740          * Let's also add one more level for imbalance.
1741          */
1742         depth = 5;
1743
1744         /* allocation of new data block(s) */
1745         needed = 2;
1746
1747         /*
1748          * tree can be full, so it would need to grow in depth:
1749          * we need one credit to modify old root, credits for
1750          * new root will be added in split accounting
1751          */
1752         needed += 1;
1753
1754         /*
1755          * Index split can happen, we would need:
1756          *    allocate intermediate indexes (bitmap + group)
1757          *  + change two blocks at each level, but root (already included)
1758          */
1759         needed += (depth * 2) + (depth * 2);
1760
1761         /* any allocation modifies superblock */
1762         needed += 1;
1763
1764         return needed;
1765 }
1766
1767 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1768                                 struct ext4_extent *ex,
1769                                 ext4_lblk_t from, ext4_lblk_t to)
1770 {
1771         struct buffer_head *bh;
1772         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
1773         int i, metadata = 0;
1774
1775         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1776                 metadata = 1;
1777 #ifdef EXTENTS_STATS
1778         {
1779                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1780                 spin_lock(&sbi->s_ext_stats_lock);
1781                 sbi->s_ext_blocks += ee_len;
1782                 sbi->s_ext_extents++;
1783                 if (ee_len < sbi->s_ext_min)
1784                         sbi->s_ext_min = ee_len;
1785                 if (ee_len > sbi->s_ext_max)
1786                         sbi->s_ext_max = ee_len;
1787                 if (ext_depth(inode) > sbi->s_depth_max)
1788                         sbi->s_depth_max = ext_depth(inode);
1789                 spin_unlock(&sbi->s_ext_stats_lock);
1790         }
1791 #endif
1792         if (from >= le32_to_cpu(ex->ee_block)
1793             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1794                 /* tail removal */
1795                 ext4_lblk_t num;
1796                 ext4_fsblk_t start;
1797
1798                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
1799                 start = ext_pblock(ex) + ee_len - num;
1800                 ext_debug("free last %u blocks starting %llu\n", num, start);
1801                 for (i = 0; i < num; i++) {
1802                         bh = sb_find_get_block(inode->i_sb, start + i);
1803                         ext4_forget(handle, 0, inode, bh, start + i);
1804                 }
1805                 ext4_free_blocks(handle, inode, start, num, metadata);
1806         } else if (from == le32_to_cpu(ex->ee_block)
1807                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1808                 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
1809                         from, to, le32_to_cpu(ex->ee_block), ee_len);
1810         } else {
1811                 printk(KERN_INFO "strange request: removal(2) "
1812                                 "%u-%u from %u:%u\n",
1813                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
1814         }
1815         return 0;
1816 }
1817
1818 static int
1819 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1820                 struct ext4_ext_path *path, ext4_lblk_t start)
1821 {
1822         int err = 0, correct_index = 0;
1823         int depth = ext_depth(inode), credits;
1824         struct ext4_extent_header *eh;
1825         ext4_lblk_t a, b, block;
1826         unsigned num;
1827         ext4_lblk_t ex_ee_block;
1828         unsigned short ex_ee_len;
1829         unsigned uninitialized = 0;
1830         struct ext4_extent *ex;
1831
1832         /* the header must be checked already in ext4_ext_remove_space() */
1833         ext_debug("truncate since %u in leaf\n", start);
1834         if (!path[depth].p_hdr)
1835                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1836         eh = path[depth].p_hdr;
1837         BUG_ON(eh == NULL);
1838
1839         /* find where to start removing */
1840         ex = EXT_LAST_EXTENT(eh);
1841
1842         ex_ee_block = le32_to_cpu(ex->ee_block);
1843         if (ext4_ext_is_uninitialized(ex))
1844                 uninitialized = 1;
1845         ex_ee_len = ext4_ext_get_actual_len(ex);
1846
1847         while (ex >= EXT_FIRST_EXTENT(eh) &&
1848                         ex_ee_block + ex_ee_len > start) {
1849                 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1850                 path[depth].p_ext = ex;
1851
1852                 a = ex_ee_block > start ? ex_ee_block : start;
1853                 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1854                         ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1855
1856                 ext_debug("  border %u:%u\n", a, b);
1857
1858                 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1859                         block = 0;
1860                         num = 0;
1861                         BUG();
1862                 } else if (a != ex_ee_block) {
1863                         /* remove tail of the extent */
1864                         block = ex_ee_block;
1865                         num = a - block;
1866                 } else if (b != ex_ee_block + ex_ee_len - 1) {
1867                         /* remove head of the extent */
1868                         block = a;
1869                         num = b - a;
1870                         /* there is no "make a hole" API yet */
1871                         BUG();
1872                 } else {
1873                         /* remove whole extent: excellent! */
1874                         block = ex_ee_block;
1875                         num = 0;
1876                         BUG_ON(a != ex_ee_block);
1877                         BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1878                 }
1879
1880                 /* at present, extent can't cross block group: */
1881                 /* leaf + bitmap + group desc + sb + inode */
1882                 credits = 5;
1883                 if (ex == EXT_FIRST_EXTENT(eh)) {
1884                         correct_index = 1;
1885                         credits += (ext_depth(inode)) + 1;
1886                 }
1887 #ifdef CONFIG_QUOTA
1888                 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1889 #endif
1890
1891                 handle = ext4_ext_journal_restart(handle, credits);
1892                 if (IS_ERR(handle)) {
1893                         err = PTR_ERR(handle);
1894                         goto out;
1895                 }
1896
1897                 err = ext4_ext_get_access(handle, inode, path + depth);
1898                 if (err)
1899                         goto out;
1900
1901                 err = ext4_remove_blocks(handle, inode, ex, a, b);
1902                 if (err)
1903                         goto out;
1904
1905                 if (num == 0) {
1906                         /* this extent is removed; mark slot entirely unused */
1907                         ext4_ext_store_pblock(ex, 0);
1908                         le16_add_cpu(&eh->eh_entries, -1);
1909                 }
1910
1911                 ex->ee_block = cpu_to_le32(block);
1912                 ex->ee_len = cpu_to_le16(num);
1913                 /*
1914                  * Do not mark uninitialized if all the blocks in the
1915                  * extent have been removed.
1916                  */
1917                 if (uninitialized && num)
1918                         ext4_ext_mark_uninitialized(ex);
1919
1920                 err = ext4_ext_dirty(handle, inode, path + depth);
1921                 if (err)
1922                         goto out;
1923
1924                 ext_debug("new extent: %u:%u:%llu\n", block, num,
1925                                 ext_pblock(ex));
1926                 ex--;
1927                 ex_ee_block = le32_to_cpu(ex->ee_block);
1928                 ex_ee_len = ext4_ext_get_actual_len(ex);
1929         }
1930
1931         if (correct_index && eh->eh_entries)
1932                 err = ext4_ext_correct_indexes(handle, inode, path);
1933
1934         /* if this leaf is free, then we should
1935          * remove it from index block above */
1936         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1937                 err = ext4_ext_rm_idx(handle, inode, path + depth);
1938
1939 out:
1940         return err;
1941 }
1942
1943 /*
1944  * ext4_ext_more_to_rm:
1945  * returns 1 if current index has to be freed (even partial)
1946  */
1947 static int
1948 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1949 {
1950         BUG_ON(path->p_idx == NULL);
1951
1952         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1953                 return 0;
1954
1955         /*
1956          * if truncate on deeper level happened, it wasn't partial,
1957          * so we have to consider current index for truncation
1958          */
1959         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1960                 return 0;
1961         return 1;
1962 }
1963
1964 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
1965 {
1966         struct super_block *sb = inode->i_sb;
1967         int depth = ext_depth(inode);
1968         struct ext4_ext_path *path;
1969         handle_t *handle;
1970         int i = 0, err = 0;
1971
1972         ext_debug("truncate since %u\n", start);
1973
1974         /* probably first extent we're gonna free will be last in block */
1975         handle = ext4_journal_start(inode, depth + 1);
1976         if (IS_ERR(handle))
1977                 return PTR_ERR(handle);
1978
1979         ext4_ext_invalidate_cache(inode);
1980
1981         /*
1982          * We start scanning from right side, freeing all the blocks
1983          * after i_size and walking into the tree depth-wise.
1984          */
1985         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
1986         if (path == NULL) {
1987                 ext4_journal_stop(handle);
1988                 return -ENOMEM;
1989         }
1990         path[0].p_hdr = ext_inode_hdr(inode);
1991         if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
1992                 err = -EIO;
1993                 goto out;
1994         }
1995         path[0].p_depth = depth;
1996
1997         while (i >= 0 && err == 0) {
1998                 if (i == depth) {
1999                         /* this is leaf block */
2000                         err = ext4_ext_rm_leaf(handle, inode, path, start);
2001                         /* root level has p_bh == NULL, brelse() eats this */
2002                         brelse(path[i].p_bh);
2003                         path[i].p_bh = NULL;
2004                         i--;
2005                         continue;
2006                 }
2007
2008                 /* this is index block */
2009                 if (!path[i].p_hdr) {
2010                         ext_debug("initialize header\n");
2011                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2012                 }
2013
2014                 if (!path[i].p_idx) {
2015                         /* this level hasn't been touched yet */
2016                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2017                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2018                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2019                                   path[i].p_hdr,
2020                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2021                 } else {
2022                         /* we were already here, see at next index */
2023                         path[i].p_idx--;
2024                 }
2025
2026                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2027                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2028                                 path[i].p_idx);
2029                 if (ext4_ext_more_to_rm(path + i)) {
2030                         struct buffer_head *bh;
2031                         /* go to the next level */
2032                         ext_debug("move to level %d (block %llu)\n",
2033                                   i + 1, idx_pblock(path[i].p_idx));
2034                         memset(path + i + 1, 0, sizeof(*path));
2035                         bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2036                         if (!bh) {
2037                                 /* should we reset i_size? */
2038                                 err = -EIO;
2039                                 break;
2040                         }
2041                         if (WARN_ON(i + 1 > depth)) {
2042                                 err = -EIO;
2043                                 break;
2044                         }
2045                         if (ext4_ext_check_header(inode, ext_block_hdr(bh),
2046                                                         depth - i - 1)) {
2047                                 err = -EIO;
2048                                 break;
2049                         }
2050                         path[i + 1].p_bh = bh;
2051
2052                         /* save actual number of indexes since this
2053                          * number is changed at the next iteration */
2054                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2055                         i++;
2056                 } else {
2057                         /* we finished processing this index, go up */
2058                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2059                                 /* index is empty, remove it;
2060                                  * handle must be already prepared by the
2061                                  * truncatei_leaf() */
2062                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2063                         }
2064                         /* root level has p_bh == NULL, brelse() eats this */
2065                         brelse(path[i].p_bh);
2066                         path[i].p_bh = NULL;
2067                         i--;
2068                         ext_debug("return to level %d\n", i);
2069                 }
2070         }
2071
2072         /* TODO: flexible tree reduction should be here */
2073         if (path->p_hdr->eh_entries == 0) {
2074                 /*
2075                  * truncate to zero freed all the tree,
2076                  * so we need to correct eh_depth
2077                  */
2078                 err = ext4_ext_get_access(handle, inode, path);
2079                 if (err == 0) {
2080                         ext_inode_hdr(inode)->eh_depth = 0;
2081                         ext_inode_hdr(inode)->eh_max =
2082                                 cpu_to_le16(ext4_ext_space_root(inode));
2083                         err = ext4_ext_dirty(handle, inode, path);
2084                 }
2085         }
2086 out:
2087         ext4_ext_tree_changed(inode);
2088         ext4_ext_drop_refs(path);
2089         kfree(path);
2090         ext4_journal_stop(handle);
2091
2092         return err;
2093 }
2094
2095 /*
2096  * called at mount time
2097  */
2098 void ext4_ext_init(struct super_block *sb)
2099 {
2100         /*
2101          * possible initialization would be here
2102          */
2103
2104         if (test_opt(sb, EXTENTS)) {
2105                 printk("EXT4-fs: file extents enabled");
2106 #ifdef AGGRESSIVE_TEST
2107                 printk(", aggressive tests");
2108 #endif
2109 #ifdef CHECK_BINSEARCH
2110                 printk(", check binsearch");
2111 #endif
2112 #ifdef EXTENTS_STATS
2113                 printk(", stats");
2114 #endif
2115                 printk("\n");
2116 #ifdef EXTENTS_STATS
2117                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2118                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2119                 EXT4_SB(sb)->s_ext_max = 0;
2120 #endif
2121         }
2122 }
2123
2124 /*
2125  * called at umount time
2126  */
2127 void ext4_ext_release(struct super_block *sb)
2128 {
2129         if (!test_opt(sb, EXTENTS))
2130                 return;
2131
2132 #ifdef EXTENTS_STATS
2133         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2134                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2135                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2136                         sbi->s_ext_blocks, sbi->s_ext_extents,
2137                         sbi->s_ext_blocks / sbi->s_ext_extents);
2138                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2139                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2140         }
2141 #endif
2142 }
2143
2144 static void bi_complete(struct bio *bio, int error)
2145 {
2146         complete((struct completion *)bio->bi_private);
2147 }
2148
2149 /* FIXME!! we need to try to merge to left or right after zero-out  */
2150 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2151 {
2152         int ret = -EIO;
2153         struct bio *bio;
2154         int blkbits, blocksize;
2155         sector_t ee_pblock;
2156         struct completion event;
2157         unsigned int ee_len, len, done, offset;
2158
2159
2160         blkbits   = inode->i_blkbits;
2161         blocksize = inode->i_sb->s_blocksize;
2162         ee_len    = ext4_ext_get_actual_len(ex);
2163         ee_pblock = ext_pblock(ex);
2164
2165         /* convert ee_pblock to 512 byte sectors */
2166         ee_pblock = ee_pblock << (blkbits - 9);
2167
2168         while (ee_len > 0) {
2169
2170                 if (ee_len > BIO_MAX_PAGES)
2171                         len = BIO_MAX_PAGES;
2172                 else
2173                         len = ee_len;
2174
2175                 bio = bio_alloc(GFP_NOIO, len);
2176                 if (!bio)
2177                         return -ENOMEM;
2178                 bio->bi_sector = ee_pblock;
2179                 bio->bi_bdev   = inode->i_sb->s_bdev;
2180
2181                 done = 0;
2182                 offset = 0;
2183                 while (done < len) {
2184                         ret = bio_add_page(bio, ZERO_PAGE(0),
2185                                                         blocksize, offset);
2186                         if (ret != blocksize) {
2187                                 /*
2188                                  * We can't add any more pages because of
2189                                  * hardware limitations.  Start a new bio.
2190                                  */
2191                                 break;
2192                         }
2193                         done++;
2194                         offset += blocksize;
2195                         if (offset >= PAGE_CACHE_SIZE)
2196                                 offset = 0;
2197                 }
2198
2199                 init_completion(&event);
2200                 bio->bi_private = &event;
2201                 bio->bi_end_io = bi_complete;
2202                 submit_bio(WRITE, bio);
2203                 wait_for_completion(&event);
2204
2205                 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2206                         ret = 0;
2207                 else {
2208                         ret = -EIO;
2209                         break;
2210                 }
2211                 bio_put(bio);
2212                 ee_len    -= done;
2213                 ee_pblock += done  << (blkbits - 9);
2214         }
2215         return ret;
2216 }
2217
2218 #define EXT4_EXT_ZERO_LEN 7
2219
2220 /*
2221  * This function is called by ext4_ext_get_blocks() if someone tries to write
2222  * to an uninitialized extent. It may result in splitting the uninitialized
2223  * extent into multiple extents (upto three - one initialized and two
2224  * uninitialized).
2225  * There are three possibilities:
2226  *   a> There is no split required: Entire extent should be initialized
2227  *   b> Splits in two extents: Write is happening at either end of the extent
2228  *   c> Splits in three extents: Somone is writing in middle of the extent
2229  */
2230 static int ext4_ext_convert_to_initialized(handle_t *handle,
2231                                                 struct inode *inode,
2232                                                 struct ext4_ext_path *path,
2233                                                 ext4_lblk_t iblock,
2234                                                 unsigned long max_blocks)
2235 {
2236         struct ext4_extent *ex, newex, orig_ex;
2237         struct ext4_extent *ex1 = NULL;
2238         struct ext4_extent *ex2 = NULL;
2239         struct ext4_extent *ex3 = NULL;
2240         struct ext4_extent_header *eh;
2241         ext4_lblk_t ee_block;
2242         unsigned int allocated, ee_len, depth;
2243         ext4_fsblk_t newblock;
2244         int err = 0;
2245         int ret = 0;
2246
2247         depth = ext_depth(inode);
2248         eh = path[depth].p_hdr;
2249         ex = path[depth].p_ext;
2250         ee_block = le32_to_cpu(ex->ee_block);
2251         ee_len = ext4_ext_get_actual_len(ex);
2252         allocated = ee_len - (iblock - ee_block);
2253         newblock = iblock - ee_block + ext_pblock(ex);
2254         ex2 = ex;
2255         orig_ex.ee_block = ex->ee_block;
2256         orig_ex.ee_len   = cpu_to_le16(ee_len);
2257         ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2258
2259         err = ext4_ext_get_access(handle, inode, path + depth);
2260         if (err)
2261                 goto out;
2262         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2263         if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2264                 err =  ext4_ext_zeroout(inode, &orig_ex);
2265                 if (err)
2266                         goto fix_extent_len;
2267                 /* update the extent length and mark as initialized */
2268                 ex->ee_block = orig_ex.ee_block;
2269                 ex->ee_len   = orig_ex.ee_len;
2270                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2271                 ext4_ext_dirty(handle, inode, path + depth);
2272                 /* zeroed the full extent */
2273                 return allocated;
2274         }
2275
2276         /* ex1: ee_block to iblock - 1 : uninitialized */
2277         if (iblock > ee_block) {
2278                 ex1 = ex;
2279                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2280                 ext4_ext_mark_uninitialized(ex1);
2281                 ex2 = &newex;
2282         }
2283         /*
2284          * for sanity, update the length of the ex2 extent before
2285          * we insert ex3, if ex1 is NULL. This is to avoid temporary
2286          * overlap of blocks.
2287          */
2288         if (!ex1 && allocated > max_blocks)
2289                 ex2->ee_len = cpu_to_le16(max_blocks);
2290         /* ex3: to ee_block + ee_len : uninitialised */
2291         if (allocated > max_blocks) {
2292                 unsigned int newdepth;
2293                 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2294                 if (allocated <= EXT4_EXT_ZERO_LEN) {
2295                         /* Mark first half uninitialized.
2296                          * Mark second half initialized and zero out the
2297                          * initialized extent
2298                          */
2299                         ex->ee_block = orig_ex.ee_block;
2300                         ex->ee_len   = cpu_to_le16(ee_len - allocated);
2301                         ext4_ext_mark_uninitialized(ex);
2302                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2303                         ext4_ext_dirty(handle, inode, path + depth);
2304
2305                         ex3 = &newex;
2306                         ex3->ee_block = cpu_to_le32(iblock);
2307                         ext4_ext_store_pblock(ex3, newblock);
2308                         ex3->ee_len = cpu_to_le16(allocated);
2309                         err = ext4_ext_insert_extent(handle, inode, path, ex3);
2310                         if (err == -ENOSPC) {
2311                                 err =  ext4_ext_zeroout(inode, &orig_ex);
2312                                 if (err)
2313                                         goto fix_extent_len;
2314                                 ex->ee_block = orig_ex.ee_block;
2315                                 ex->ee_len   = orig_ex.ee_len;
2316                                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2317                                 ext4_ext_dirty(handle, inode, path + depth);
2318                                 /* zeroed the full extent */
2319                                 return allocated;
2320
2321                         } else if (err)
2322                                 goto fix_extent_len;
2323
2324                         /*
2325                          * We need to zero out the second half because
2326                          * an fallocate request can update file size and
2327                          * converting the second half to initialized extent
2328                          * implies that we can leak some junk data to user
2329                          * space.
2330                          */
2331                         err =  ext4_ext_zeroout(inode, ex3);
2332                         if (err) {
2333                                 /*
2334                                  * We should actually mark the
2335                                  * second half as uninit and return error
2336                                  * Insert would have changed the extent
2337                                  */
2338                                 depth = ext_depth(inode);
2339                                 ext4_ext_drop_refs(path);
2340                                 path = ext4_ext_find_extent(inode,
2341                                                                 iblock, path);
2342                                 if (IS_ERR(path)) {
2343                                         err = PTR_ERR(path);
2344                                         return err;
2345                                 }
2346                                 ex = path[depth].p_ext;
2347                                 err = ext4_ext_get_access(handle, inode,
2348                                                                 path + depth);
2349                                 if (err)
2350                                         return err;
2351                                 ext4_ext_mark_uninitialized(ex);
2352                                 ext4_ext_dirty(handle, inode, path + depth);
2353                                 return err;
2354                         }
2355
2356                         /* zeroed the second half */
2357                         return allocated;
2358                 }
2359                 ex3 = &newex;
2360                 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2361                 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2362                 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2363                 ext4_ext_mark_uninitialized(ex3);
2364                 err = ext4_ext_insert_extent(handle, inode, path, ex3);
2365                 if (err == -ENOSPC) {
2366                         err =  ext4_ext_zeroout(inode, &orig_ex);
2367                         if (err)
2368                                 goto fix_extent_len;
2369                         /* update the extent length and mark as initialized */
2370                         ex->ee_block = orig_ex.ee_block;
2371                         ex->ee_len   = orig_ex.ee_len;
2372                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2373                         ext4_ext_dirty(handle, inode, path + depth);
2374                         /* zeroed the full extent */
2375                         return allocated;
2376
2377                 } else if (err)
2378                         goto fix_extent_len;
2379                 /*
2380                  * The depth, and hence eh & ex might change
2381                  * as part of the insert above.
2382                  */
2383                 newdepth = ext_depth(inode);
2384                 /*
2385                  * update the extent length after successfull insert of the
2386                  * split extent
2387                  */
2388                 orig_ex.ee_len = cpu_to_le16(ee_len -
2389                                                 ext4_ext_get_actual_len(ex3));
2390                 if (newdepth != depth) {
2391                         depth = newdepth;
2392                         ext4_ext_drop_refs(path);
2393                         path = ext4_ext_find_extent(inode, iblock, path);
2394                         if (IS_ERR(path)) {
2395                                 err = PTR_ERR(path);
2396                                 goto out;
2397                         }
2398                         eh = path[depth].p_hdr;
2399                         ex = path[depth].p_ext;
2400                         if (ex2 != &newex)
2401                                 ex2 = ex;
2402
2403                         err = ext4_ext_get_access(handle, inode, path + depth);
2404                         if (err)
2405                                 goto out;
2406                 }
2407                 allocated = max_blocks;
2408
2409                 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2410                  * to insert a extent in the middle zerout directly
2411                  * otherwise give the extent a chance to merge to left
2412                  */
2413                 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2414                                                         iblock != ee_block) {
2415                         err =  ext4_ext_zeroout(inode, &orig_ex);
2416                         if (err)
2417                                 goto fix_extent_len;
2418                         /* update the extent length and mark as initialized */
2419                         ex->ee_block = orig_ex.ee_block;
2420                         ex->ee_len   = orig_ex.ee_len;
2421                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2422                         ext4_ext_dirty(handle, inode, path + depth);
2423                         /* zero out the first half */
2424                         return allocated;
2425                 }
2426         }
2427         /*
2428          * If there was a change of depth as part of the
2429          * insertion of ex3 above, we need to update the length
2430          * of the ex1 extent again here
2431          */
2432         if (ex1 && ex1 != ex) {
2433                 ex1 = ex;
2434                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2435                 ext4_ext_mark_uninitialized(ex1);
2436                 ex2 = &newex;
2437         }
2438         /* ex2: iblock to iblock + maxblocks-1 : initialised */
2439         ex2->ee_block = cpu_to_le32(iblock);
2440         ext4_ext_store_pblock(ex2, newblock);
2441         ex2->ee_len = cpu_to_le16(allocated);
2442         if (ex2 != ex)
2443                 goto insert;
2444         /*
2445          * New (initialized) extent starts from the first block
2446          * in the current extent. i.e., ex2 == ex
2447          * We have to see if it can be merged with the extent
2448          * on the left.
2449          */
2450         if (ex2 > EXT_FIRST_EXTENT(eh)) {
2451                 /*
2452                  * To merge left, pass "ex2 - 1" to try_to_merge(),
2453                  * since it merges towards right _only_.
2454                  */
2455                 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2456                 if (ret) {
2457                         err = ext4_ext_correct_indexes(handle, inode, path);
2458                         if (err)
2459                                 goto out;
2460                         depth = ext_depth(inode);
2461                         ex2--;
2462                 }
2463         }
2464         /*
2465          * Try to Merge towards right. This might be required
2466          * only when the whole extent is being written to.
2467          * i.e. ex2 == ex and ex3 == NULL.
2468          */
2469         if (!ex3) {
2470                 ret = ext4_ext_try_to_merge(inode, path, ex2);
2471                 if (ret) {
2472                         err = ext4_ext_correct_indexes(handle, inode, path);
2473                         if (err)
2474                                 goto out;
2475                 }
2476         }
2477         /* Mark modified extent as dirty */
2478         err = ext4_ext_dirty(handle, inode, path + depth);
2479         goto out;
2480 insert:
2481         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2482         if (err == -ENOSPC) {
2483                 err =  ext4_ext_zeroout(inode, &orig_ex);
2484                 if (err)
2485                         goto fix_extent_len;
2486                 /* update the extent length and mark as initialized */
2487                 ex->ee_block = orig_ex.ee_block;
2488                 ex->ee_len   = orig_ex.ee_len;
2489                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2490                 ext4_ext_dirty(handle, inode, path + depth);
2491                 /* zero out the first half */
2492                 return allocated;
2493         } else if (err)
2494                 goto fix_extent_len;
2495 out:
2496         return err ? err : allocated;
2497
2498 fix_extent_len:
2499         ex->ee_block = orig_ex.ee_block;
2500         ex->ee_len   = orig_ex.ee_len;
2501         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2502         ext4_ext_mark_uninitialized(ex);
2503         ext4_ext_dirty(handle, inode, path + depth);
2504         return err;
2505 }
2506
2507 /*
2508  * Block allocation/map/preallocation routine for extents based files
2509  *
2510  *
2511  * Need to be called with
2512  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
2513  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
2514  *
2515  * return > 0, number of of blocks already mapped/allocated
2516  *          if create == 0 and these are pre-allocated blocks
2517  *              buffer head is unmapped
2518  *          otherwise blocks are mapped
2519  *
2520  * return = 0, if plain look up failed (blocks have not been allocated)
2521  *          buffer head is unmapped
2522  *
2523  * return < 0, error case.
2524  */
2525 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2526                         ext4_lblk_t iblock,
2527                         unsigned long max_blocks, struct buffer_head *bh_result,
2528                         int create, int extend_disksize)
2529 {
2530         struct ext4_ext_path *path = NULL;
2531         struct ext4_extent_header *eh;
2532         struct ext4_extent newex, *ex;
2533         ext4_fsblk_t goal, newblock;
2534         int err = 0, depth, ret;
2535         unsigned long allocated = 0;
2536         struct ext4_allocation_request ar;
2537
2538         __clear_bit(BH_New, &bh_result->b_state);
2539         ext_debug("blocks %u/%lu requested for inode %u\n",
2540                         iblock, max_blocks, inode->i_ino);
2541
2542         /* check in cache */
2543         goal = ext4_ext_in_cache(inode, iblock, &newex);
2544         if (goal) {
2545                 if (goal == EXT4_EXT_CACHE_GAP) {
2546                         if (!create) {
2547                                 /*
2548                                  * block isn't allocated yet and
2549                                  * user doesn't want to allocate it
2550                                  */
2551                                 goto out2;
2552                         }
2553                         /* we should allocate requested block */
2554                 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
2555                         /* block is already allocated */
2556                         newblock = iblock
2557                                    - le32_to_cpu(newex.ee_block)
2558                                    + ext_pblock(&newex);
2559                         /* number of remaining blocks in the extent */
2560                         allocated = ext4_ext_get_actual_len(&newex) -
2561                                         (iblock - le32_to_cpu(newex.ee_block));
2562                         goto out;
2563                 } else {
2564                         BUG();
2565                 }
2566         }
2567
2568         /* find extent for this block */
2569         path = ext4_ext_find_extent(inode, iblock, NULL);
2570         if (IS_ERR(path)) {
2571                 err = PTR_ERR(path);
2572                 path = NULL;
2573                 goto out2;
2574         }
2575
2576         depth = ext_depth(inode);
2577
2578         /*
2579          * consistent leaf must not be empty;
2580          * this situation is possible, though, _during_ tree modification;
2581          * this is why assert can't be put in ext4_ext_find_extent()
2582          */
2583         BUG_ON(path[depth].p_ext == NULL && depth != 0);
2584         eh = path[depth].p_hdr;
2585
2586         ex = path[depth].p_ext;
2587         if (ex) {
2588                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2589                 ext4_fsblk_t ee_start = ext_pblock(ex);
2590                 unsigned short ee_len;
2591
2592                 /*
2593                  * Uninitialized extents are treated as holes, except that
2594                  * we split out initialized portions during a write.
2595                  */
2596                 ee_len = ext4_ext_get_actual_len(ex);
2597                 /* if found extent covers block, simply return it */
2598                 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2599                         newblock = iblock - ee_block + ee_start;
2600                         /* number of remaining blocks in the extent */
2601                         allocated = ee_len - (iblock - ee_block);
2602                         ext_debug("%u fit into %lu:%d -> %llu\n", iblock,
2603                                         ee_block, ee_len, newblock);
2604
2605                         /* Do not put uninitialized extent in the cache */
2606                         if (!ext4_ext_is_uninitialized(ex)) {
2607                                 ext4_ext_put_in_cache(inode, ee_block,
2608                                                         ee_len, ee_start,
2609                                                         EXT4_EXT_CACHE_EXTENT);
2610                                 goto out;
2611                         }
2612                         if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2613                                 goto out;
2614                         if (!create) {
2615                                 /*
2616                                  * We have blocks reserved already.  We
2617                                  * return allocated blocks so that delalloc
2618                                  * won't do block reservation for us.  But
2619                                  * the buffer head will be unmapped so that
2620                                  * a read from the block returns 0s.
2621                                  */
2622                                 if (allocated > max_blocks)
2623                                         allocated = max_blocks;
2624                                 /* mark the buffer unwritten */
2625                                 __set_bit(BH_Unwritten, &bh_result->b_state);
2626                                 goto out2;
2627                         }
2628
2629                         ret = ext4_ext_convert_to_initialized(handle, inode,
2630                                                                 path, iblock,
2631                                                                 max_blocks);
2632                         if (ret <= 0) {
2633                                 err = ret;
2634                                 goto out2;
2635                         } else
2636                                 allocated = ret;
2637                         goto outnew;
2638                 }
2639         }
2640
2641         /*
2642          * requested block isn't allocated yet;
2643          * we couldn't try to create block if create flag is zero
2644          */
2645         if (!create) {
2646                 /*
2647                  * put just found gap into cache to speed up
2648                  * subsequent requests
2649                  */
2650                 ext4_ext_put_gap_in_cache(inode, path, iblock);
2651                 goto out2;
2652         }
2653         /*
2654          * Okay, we need to do block allocation.  Lazily initialize the block
2655          * allocation info here if necessary.
2656          */
2657         if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2658                 ext4_init_block_alloc_info(inode);
2659
2660         /* find neighbour allocated blocks */
2661         ar.lleft = iblock;
2662         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
2663         if (err)
2664                 goto out2;
2665         ar.lright = iblock;
2666         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
2667         if (err)
2668                 goto out2;
2669
2670         /*
2671          * See if request is beyond maximum number of blocks we can have in
2672          * a single extent. For an initialized extent this limit is
2673          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2674          * EXT_UNINIT_MAX_LEN.
2675          */
2676         if (max_blocks > EXT_INIT_MAX_LEN &&
2677             create != EXT4_CREATE_UNINITIALIZED_EXT)
2678                 max_blocks = EXT_INIT_MAX_LEN;
2679         else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2680                  create == EXT4_CREATE_UNINITIALIZED_EXT)
2681                 max_blocks = EXT_UNINIT_MAX_LEN;
2682
2683         /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2684         newex.ee_block = cpu_to_le32(iblock);
2685         newex.ee_len = cpu_to_le16(max_blocks);
2686         err = ext4_ext_check_overlap(inode, &newex, path);
2687         if (err)
2688                 allocated = ext4_ext_get_actual_len(&newex);
2689         else
2690                 allocated = max_blocks;
2691
2692         /* allocate new block */
2693         ar.inode = inode;
2694         ar.goal = ext4_ext_find_goal(inode, path, iblock);
2695         ar.logical = iblock;
2696         ar.len = allocated;
2697         if (S_ISREG(inode->i_mode))
2698                 ar.flags = EXT4_MB_HINT_DATA;
2699         else
2700                 /* disable in-core preallocation for non-regular files */
2701                 ar.flags = 0;
2702         newblock = ext4_mb_new_blocks(handle, &ar, &err);
2703         if (!newblock)
2704                 goto out2;
2705         ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2706                         goal, newblock, allocated);
2707
2708         /* try to insert new extent into found leaf and return */
2709         ext4_ext_store_pblock(&newex, newblock);
2710         newex.ee_len = cpu_to_le16(ar.len);
2711         if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2712                 ext4_ext_mark_uninitialized(&newex);
2713         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2714         if (err) {
2715                 /* free data blocks we just allocated */
2716                 /* not a good idea to call discard here directly,
2717                  * but otherwise we'd need to call it every free() */
2718                 ext4_mb_discard_inode_preallocations(inode);
2719                 ext4_free_blocks(handle, inode, ext_pblock(&newex),
2720                                         ext4_ext_get_actual_len(&newex), 0);
2721                 goto out2;
2722         }
2723
2724         /* previous routine could use block we allocated */
2725         newblock = ext_pblock(&newex);
2726         allocated = ext4_ext_get_actual_len(&newex);
2727 outnew:
2728         if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2729                 EXT4_I(inode)->i_disksize = inode->i_size;
2730
2731         __set_bit(BH_New, &bh_result->b_state);
2732
2733         /* Cache only when it is _not_ an uninitialized extent */
2734         if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2735                 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2736                                                 EXT4_EXT_CACHE_EXTENT);
2737 out:
2738         if (allocated > max_blocks)
2739                 allocated = max_blocks;
2740         ext4_ext_show_leaf(inode, path);
2741         __set_bit(BH_Mapped, &bh_result->b_state);
2742         bh_result->b_bdev = inode->i_sb->s_bdev;
2743         bh_result->b_blocknr = newblock;
2744 out2:
2745         if (path) {
2746                 ext4_ext_drop_refs(path);
2747                 kfree(path);
2748         }
2749         return err ? err : allocated;
2750 }
2751
2752 void ext4_ext_truncate(struct inode * inode, struct page *page)
2753 {
2754         struct address_space *mapping = inode->i_mapping;
2755         struct super_block *sb = inode->i_sb;
2756         ext4_lblk_t last_block;
2757         handle_t *handle;
2758         int err = 0;
2759
2760         /*
2761          * probably first extent we're gonna free will be last in block
2762          */
2763         err = ext4_writepage_trans_blocks(inode) + 3;
2764         handle = ext4_journal_start(inode, err);
2765         if (IS_ERR(handle)) {
2766                 if (page) {
2767                         clear_highpage(page);
2768                         flush_dcache_page(page);
2769                         unlock_page(page);
2770                         page_cache_release(page);
2771                 }
2772                 return;
2773         }
2774
2775         if (page)
2776                 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2777
2778         down_write(&EXT4_I(inode)->i_data_sem);
2779         ext4_ext_invalidate_cache(inode);
2780
2781         ext4_mb_discard_inode_preallocations(inode);
2782
2783         /*
2784          * TODO: optimization is possible here.
2785          * Probably we need not scan at all,
2786          * because page truncation is enough.
2787          */
2788         if (ext4_orphan_add(handle, inode))
2789                 goto out_stop;
2790
2791         /* we have to know where to truncate from in crash case */
2792         EXT4_I(inode)->i_disksize = inode->i_size;
2793         ext4_mark_inode_dirty(handle, inode);
2794
2795         last_block = (inode->i_size + sb->s_blocksize - 1)
2796                         >> EXT4_BLOCK_SIZE_BITS(sb);
2797         err = ext4_ext_remove_space(inode, last_block);
2798
2799         /* In a multi-transaction truncate, we only make the final
2800          * transaction synchronous.
2801          */
2802         if (IS_SYNC(inode))
2803                 handle->h_sync = 1;
2804
2805 out_stop:
2806         /*
2807          * If this was a simple ftruncate() and the file will remain alive,
2808          * then we need to clear up the orphan record which we created above.
2809          * However, if this was a real unlink then we were called by
2810          * ext4_delete_inode(), and we allow that function to clean up the
2811          * orphan info for us.
2812          */
2813         if (inode->i_nlink)
2814                 ext4_orphan_del(handle, inode);
2815
2816         up_write(&EXT4_I(inode)->i_data_sem);
2817         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2818         ext4_mark_inode_dirty(handle, inode);
2819         ext4_journal_stop(handle);
2820 }
2821
2822 /*
2823  * ext4_ext_writepage_trans_blocks:
2824  * calculate max number of blocks we could modify
2825  * in order to allocate new block for an inode
2826  */
2827 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2828 {
2829         int needed;
2830
2831         needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2832
2833         /* caller wants to allocate num blocks, but note it includes sb */
2834         needed = needed * num - (num - 1);
2835
2836 #ifdef CONFIG_QUOTA
2837         needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2838 #endif
2839
2840         return needed;
2841 }
2842
2843 static void ext4_falloc_update_inode(struct inode *inode,
2844                                 int mode, loff_t new_size, int update_ctime)
2845 {
2846         struct timespec now;
2847
2848         if (update_ctime) {
2849                 now = current_fs_time(inode->i_sb);
2850                 if (!timespec_equal(&inode->i_ctime, &now))
2851                         inode->i_ctime = now;
2852         }
2853         /*
2854          * Update only when preallocation was requested beyond
2855          * the file size.
2856          */
2857         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2858                                 new_size > i_size_read(inode)) {
2859                 i_size_write(inode, new_size);
2860                 EXT4_I(inode)->i_disksize = new_size;
2861         }
2862
2863 }
2864
2865 /*
2866  * preallocate space for a file. This implements ext4's fallocate inode
2867  * operation, which gets called from sys_fallocate system call.
2868  * For block-mapped files, posix_fallocate should fall back to the method
2869  * of writing zeroes to the required new blocks (the same behavior which is
2870  * expected for file systems which do not support fallocate() system call).
2871  */
2872 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
2873 {
2874         handle_t *handle;
2875         ext4_lblk_t block;
2876         loff_t new_size;
2877         unsigned long max_blocks;
2878         int ret = 0;
2879         int ret2 = 0;
2880         int retries = 0;
2881         struct buffer_head map_bh;
2882         unsigned int credits, blkbits = inode->i_blkbits;
2883
2884         /*
2885          * currently supporting (pre)allocate mode for extent-based
2886          * files _only_
2887          */
2888         if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
2889                 return -EOPNOTSUPP;
2890
2891         /* preallocation to directories is currently not supported */
2892         if (S_ISDIR(inode->i_mode))
2893                 return -ENODEV;
2894
2895         block = offset >> blkbits;
2896         /*
2897          * We can't just convert len to max_blocks because
2898          * If blocksize = 4096 offset = 3072 and len = 2048
2899          */
2900         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2901                                                         - block;
2902         /*
2903          * credits to insert 1 extent into extent tree + buffers to be able to
2904          * modify 1 super block, 1 block bitmap and 1 group descriptor.
2905          */
2906         credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
2907         mutex_lock(&inode->i_mutex);
2908 retry:
2909         while (ret >= 0 && ret < max_blocks) {
2910                 block = block + ret;
2911                 max_blocks = max_blocks - ret;
2912                 handle = ext4_journal_start(inode, credits);
2913                 if (IS_ERR(handle)) {
2914                         ret = PTR_ERR(handle);
2915                         break;
2916                 }
2917                 ret = ext4_get_blocks_wrap(handle, inode, block,
2918                                           max_blocks, &map_bh,
2919                                           EXT4_CREATE_UNINITIALIZED_EXT, 0);
2920                 if (ret <= 0) {
2921 #ifdef EXT4FS_DEBUG
2922                         WARN_ON(ret <= 0);
2923                         printk(KERN_ERR "%s: ext4_ext_get_blocks "
2924                                     "returned error inode#%lu, block=%u, "
2925                                     "max_blocks=%lu", __func__,
2926                                     inode->i_ino, block, max_blocks);
2927 #endif
2928                         ext4_mark_inode_dirty(handle, inode);
2929                         ret2 = ext4_journal_stop(handle);
2930                         break;
2931                 }
2932                 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
2933                                                 blkbits) >> blkbits))
2934                         new_size = offset + len;
2935                 else
2936                         new_size = (block + ret) << blkbits;
2937
2938                 ext4_falloc_update_inode(inode, mode, new_size,
2939                                                 buffer_new(&map_bh));
2940                 ext4_mark_inode_dirty(handle, inode);
2941                 ret2 = ext4_journal_stop(handle);
2942                 if (ret2)
2943                         break;
2944         }
2945         if (ret == -ENOSPC &&
2946                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
2947                 ret = 0;
2948                 goto retry;
2949         }
2950         mutex_unlock(&inode->i_mutex);
2951         return ret > 0 ? ret2 : ret;
2952 }