]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/ext4/mballoc.c
ext4: Change unsigned long to unsigned int
[linux-2.6-omap-h63xx.git] / fs / ext4 / mballoc.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18
19
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23
24 #include "mballoc.h"
25 /*
26  * MUSTDO:
27  *   - test ext4_ext_search_left() and ext4_ext_search_right()
28  *   - search for metadata in few groups
29  *
30  * TODO v4:
31  *   - normalization should take into account whether file is still open
32  *   - discard preallocations if no free space left (policy?)
33  *   - don't normalize tails
34  *   - quota
35  *   - reservation for superuser
36  *
37  * TODO v3:
38  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
39  *   - track min/max extents in each group for better group selection
40  *   - mb_mark_used() may allocate chunk right after splitting buddy
41  *   - tree of groups sorted by number of free blocks
42  *   - error handling
43  */
44
45 /*
46  * The allocation request involve request for multiple number of blocks
47  * near to the goal(block) value specified.
48  *
49  * During initialization phase of the allocator we decide to use the group
50  * preallocation or inode preallocation depending on the size file. The
51  * size of the file could be the resulting file size we would have after
52  * allocation or the current file size which ever is larger. If the size is
53  * less that sbi->s_mb_stream_request we select the group
54  * preallocation. The default value of s_mb_stream_request is 16
55  * blocks. This can also be tuned via
56  * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
57  * of number of blocks.
58  *
59  * The main motivation for having small file use group preallocation is to
60  * ensure that we have small file closer in the disk.
61  *
62  * First stage the allocator looks at the inode prealloc list
63  * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
64  * this particular inode. The inode prealloc space is represented as:
65  *
66  * pa_lstart -> the logical start block for this prealloc space
67  * pa_pstart -> the physical start block for this prealloc space
68  * pa_len    -> lenght for this prealloc space
69  * pa_free   ->  free space available in this prealloc space
70  *
71  * The inode preallocation space is used looking at the _logical_ start
72  * block. If only the logical file block falls within the range of prealloc
73  * space we will consume the particular prealloc space. This make sure that
74  * that the we have contiguous physical blocks representing the file blocks
75  *
76  * The important thing to be noted in case of inode prealloc space is that
77  * we don't modify the values associated to inode prealloc space except
78  * pa_free.
79  *
80  * If we are not able to find blocks in the inode prealloc space and if we
81  * have the group allocation flag set then we look at the locality group
82  * prealloc space. These are per CPU prealloc list repreasented as
83  *
84  * ext4_sb_info.s_locality_groups[smp_processor_id()]
85  *
86  * The reason for having a per cpu locality group is to reduce the contention
87  * between CPUs. It is possible to get scheduled at this point.
88  *
89  * The locality group prealloc space is used looking at whether we have
90  * enough free space (pa_free) withing the prealloc space.
91  *
92  * If we can't allocate blocks via inode prealloc or/and locality group
93  * prealloc then we look at the buddy cache. The buddy cache is represented
94  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
95  * mapped to the buddy and bitmap information regarding different
96  * groups. The buddy information is attached to buddy cache inode so that
97  * we can access them through the page cache. The information regarding
98  * each group is loaded via ext4_mb_load_buddy.  The information involve
99  * block bitmap and buddy information. The information are stored in the
100  * inode as:
101  *
102  *  {                        page                        }
103  *  [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
104  *
105  *
106  * one block each for bitmap and buddy information.  So for each group we
107  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
108  * blocksize) blocks.  So it can have information regarding groups_per_page
109  * which is blocks_per_page/2
110  *
111  * The buddy cache inode is not stored on disk. The inode is thrown
112  * away when the filesystem is unmounted.
113  *
114  * We look for count number of blocks in the buddy cache. If we were able
115  * to locate that many free blocks we return with additional information
116  * regarding rest of the contiguous physical block available
117  *
118  * Before allocating blocks via buddy cache we normalize the request
119  * blocks. This ensure we ask for more blocks that we needed. The extra
120  * blocks that we get after allocation is added to the respective prealloc
121  * list. In case of inode preallocation we follow a list of heuristics
122  * based on file size. This can be found in ext4_mb_normalize_request. If
123  * we are doing a group prealloc we try to normalize the request to
124  * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
125  * 512 blocks. This can be tuned via
126  * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
127  * terms of number of blocks. If we have mounted the file system with -O
128  * stripe=<value> option the group prealloc request is normalized to the
129  * stripe value (sbi->s_stripe)
130  *
131  * The regular allocator(using the buddy cache) support few tunables.
132  *
133  * /proc/fs/ext4/<partition>/min_to_scan
134  * /proc/fs/ext4/<partition>/max_to_scan
135  * /proc/fs/ext4/<partition>/order2_req
136  *
137  * The regular allocator use buddy scan only if the request len is power of
138  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
139  * value of s_mb_order2_reqs can be tuned via
140  * /proc/fs/ext4/<partition>/order2_req.  If the request len is equal to
141  * stripe size (sbi->s_stripe), we try to search for contigous block in
142  * stripe size. This should result in better allocation on RAID setup. If
143  * not we search in the specific group using bitmap for best extents. The
144  * tunable min_to_scan and max_to_scan controll the behaviour here.
145  * min_to_scan indicate how long the mballoc __must__ look for a best
146  * extent and max_to_scanindicate how long the mballoc __can__ look for a
147  * best extent in the found extents. Searching for the blocks starts with
148  * the group specified as the goal value in allocation context via
149  * ac_g_ex. Each group is first checked based on the criteria whether it
150  * can used for allocation. ext4_mb_good_group explains how the groups are
151  * checked.
152  *
153  * Both the prealloc space are getting populated as above. So for the first
154  * request we will hit the buddy cache which will result in this prealloc
155  * space getting filled. The prealloc space is then later used for the
156  * subsequent request.
157  */
158
159 /*
160  * mballoc operates on the following data:
161  *  - on-disk bitmap
162  *  - in-core buddy (actually includes buddy and bitmap)
163  *  - preallocation descriptors (PAs)
164  *
165  * there are two types of preallocations:
166  *  - inode
167  *    assiged to specific inode and can be used for this inode only.
168  *    it describes part of inode's space preallocated to specific
169  *    physical blocks. any block from that preallocated can be used
170  *    independent. the descriptor just tracks number of blocks left
171  *    unused. so, before taking some block from descriptor, one must
172  *    make sure corresponded logical block isn't allocated yet. this
173  *    also means that freeing any block within descriptor's range
174  *    must discard all preallocated blocks.
175  *  - locality group
176  *    assigned to specific locality group which does not translate to
177  *    permanent set of inodes: inode can join and leave group. space
178  *    from this type of preallocation can be used for any inode. thus
179  *    it's consumed from the beginning to the end.
180  *
181  * relation between them can be expressed as:
182  *    in-core buddy = on-disk bitmap + preallocation descriptors
183  *
184  * this mean blocks mballoc considers used are:
185  *  - allocated blocks (persistent)
186  *  - preallocated blocks (non-persistent)
187  *
188  * consistency in mballoc world means that at any time a block is either
189  * free or used in ALL structures. notice: "any time" should not be read
190  * literally -- time is discrete and delimited by locks.
191  *
192  *  to keep it simple, we don't use block numbers, instead we count number of
193  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
194  *
195  * all operations can be expressed as:
196  *  - init buddy:                       buddy = on-disk + PAs
197  *  - new PA:                           buddy += N; PA = N
198  *  - use inode PA:                     on-disk += N; PA -= N
199  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
200  *  - use locality group PA             on-disk += N; PA -= N
201  *  - discard locality group PA         buddy -= PA; PA = 0
202  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203  *        is used in real operation because we can't know actual used
204  *        bits from PA, only from on-disk bitmap
205  *
206  * if we follow this strict logic, then all operations above should be atomic.
207  * given some of them can block, we'd have to use something like semaphores
208  * killing performance on high-end SMP hardware. let's try to relax it using
209  * the following knowledge:
210  *  1) if buddy is referenced, it's already initialized
211  *  2) while block is used in buddy and the buddy is referenced,
212  *     nobody can re-allocate that block
213  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
214  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
215  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
216  *     block
217  *
218  * so, now we're building a concurrency table:
219  *  - init buddy vs.
220  *    - new PA
221  *      blocks for PA are allocated in the buddy, buddy must be referenced
222  *      until PA is linked to allocation group to avoid concurrent buddy init
223  *    - use inode PA
224  *      we need to make sure that either on-disk bitmap or PA has uptodate data
225  *      given (3) we care that PA-=N operation doesn't interfere with init
226  *    - discard inode PA
227  *      the simplest way would be to have buddy initialized by the discard
228  *    - use locality group PA
229  *      again PA-=N must be serialized with init
230  *    - discard locality group PA
231  *      the simplest way would be to have buddy initialized by the discard
232  *  - new PA vs.
233  *    - use inode PA
234  *      i_data_sem serializes them
235  *    - discard inode PA
236  *      discard process must wait until PA isn't used by another process
237  *    - use locality group PA
238  *      some mutex should serialize them
239  *    - discard locality group PA
240  *      discard process must wait until PA isn't used by another process
241  *  - use inode PA
242  *    - use inode PA
243  *      i_data_sem or another mutex should serializes them
244  *    - discard inode PA
245  *      discard process must wait until PA isn't used by another process
246  *    - use locality group PA
247  *      nothing wrong here -- they're different PAs covering different blocks
248  *    - discard locality group PA
249  *      discard process must wait until PA isn't used by another process
250  *
251  * now we're ready to make few consequences:
252  *  - PA is referenced and while it is no discard is possible
253  *  - PA is referenced until block isn't marked in on-disk bitmap
254  *  - PA changes only after on-disk bitmap
255  *  - discard must not compete with init. either init is done before
256  *    any discard or they're serialized somehow
257  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
258  *
259  * a special case when we've used PA to emptiness. no need to modify buddy
260  * in this case, but we should care about concurrent init
261  *
262  */
263
264  /*
265  * Logic in few words:
266  *
267  *  - allocation:
268  *    load group
269  *    find blocks
270  *    mark bits in on-disk bitmap
271  *    release group
272  *
273  *  - use preallocation:
274  *    find proper PA (per-inode or group)
275  *    load group
276  *    mark bits in on-disk bitmap
277  *    release group
278  *    release PA
279  *
280  *  - free:
281  *    load group
282  *    mark bits in on-disk bitmap
283  *    release group
284  *
285  *  - discard preallocations in group:
286  *    mark PAs deleted
287  *    move them onto local list
288  *    load on-disk bitmap
289  *    load group
290  *    remove PA from object (inode or locality group)
291  *    mark free blocks in-core
292  *
293  *  - discard inode's preallocations:
294  */
295
296 /*
297  * Locking rules
298  *
299  * Locks:
300  *  - bitlock on a group        (group)
301  *  - object (inode/locality)   (object)
302  *  - per-pa lock               (pa)
303  *
304  * Paths:
305  *  - new pa
306  *    object
307  *    group
308  *
309  *  - find and use pa:
310  *    pa
311  *
312  *  - release consumed pa:
313  *    pa
314  *    group
315  *    object
316  *
317  *  - generate in-core bitmap:
318  *    group
319  *        pa
320  *
321  *  - discard all for given object (inode, locality group):
322  *    object
323  *        pa
324  *    group
325  *
326  *  - discard all for given group:
327  *    group
328  *        pa
329  *    group
330  *        object
331  *
332  */
333
334 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
335 {
336 #if BITS_PER_LONG == 64
337         *bit += ((unsigned long) addr & 7UL) << 3;
338         addr = (void *) ((unsigned long) addr & ~7UL);
339 #elif BITS_PER_LONG == 32
340         *bit += ((unsigned long) addr & 3UL) << 3;
341         addr = (void *) ((unsigned long) addr & ~3UL);
342 #else
343 #error "how many bits you are?!"
344 #endif
345         return addr;
346 }
347
348 static inline int mb_test_bit(int bit, void *addr)
349 {
350         /*
351          * ext4_test_bit on architecture like powerpc
352          * needs unsigned long aligned address
353          */
354         addr = mb_correct_addr_and_bit(&bit, addr);
355         return ext4_test_bit(bit, addr);
356 }
357
358 static inline void mb_set_bit(int bit, void *addr)
359 {
360         addr = mb_correct_addr_and_bit(&bit, addr);
361         ext4_set_bit(bit, addr);
362 }
363
364 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
365 {
366         addr = mb_correct_addr_and_bit(&bit, addr);
367         ext4_set_bit_atomic(lock, bit, addr);
368 }
369
370 static inline void mb_clear_bit(int bit, void *addr)
371 {
372         addr = mb_correct_addr_and_bit(&bit, addr);
373         ext4_clear_bit(bit, addr);
374 }
375
376 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
377 {
378         addr = mb_correct_addr_and_bit(&bit, addr);
379         ext4_clear_bit_atomic(lock, bit, addr);
380 }
381
382 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
383 {
384         int fix = 0, ret, tmpmax;
385         addr = mb_correct_addr_and_bit(&fix, addr);
386         tmpmax = max + fix;
387         start += fix;
388
389         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
390         if (ret > max)
391                 return max;
392         return ret;
393 }
394
395 static inline int mb_find_next_bit(void *addr, int max, int start)
396 {
397         int fix = 0, ret, tmpmax;
398         addr = mb_correct_addr_and_bit(&fix, addr);
399         tmpmax = max + fix;
400         start += fix;
401
402         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
403         if (ret > max)
404                 return max;
405         return ret;
406 }
407
408 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
409 {
410         char *bb;
411
412         BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
413         BUG_ON(max == NULL);
414
415         if (order > e4b->bd_blkbits + 1) {
416                 *max = 0;
417                 return NULL;
418         }
419
420         /* at order 0 we see each particular block */
421         *max = 1 << (e4b->bd_blkbits + 3);
422         if (order == 0)
423                 return EXT4_MB_BITMAP(e4b);
424
425         bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
426         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
427
428         return bb;
429 }
430
431 #ifdef DOUBLE_CHECK
432 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
433                            int first, int count)
434 {
435         int i;
436         struct super_block *sb = e4b->bd_sb;
437
438         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
439                 return;
440         BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
441         for (i = 0; i < count; i++) {
442                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
443                         ext4_fsblk_t blocknr;
444                         blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
445                         blocknr += first + i;
446                         blocknr +=
447                             le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
448
449                         ext4_error(sb, __func__, "double-free of inode"
450                                    " %lu's block %llu(bit %u in group %u)",
451                                    inode ? inode->i_ino : 0, blocknr,
452                                    first + i, e4b->bd_group);
453                 }
454                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
455         }
456 }
457
458 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
459 {
460         int i;
461
462         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
463                 return;
464         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
465         for (i = 0; i < count; i++) {
466                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
467                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
468         }
469 }
470
471 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
472 {
473         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
474                 unsigned char *b1, *b2;
475                 int i;
476                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
477                 b2 = (unsigned char *) bitmap;
478                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479                         if (b1[i] != b2[i]) {
480                                 printk(KERN_ERR "corruption in group %u "
481                                        "at byte %u(%u): %x in copy != %x "
482                                        "on disk/prealloc\n",
483                                        e4b->bd_group, i, i * 8, b1[i], b2[i]);
484                                 BUG();
485                         }
486                 }
487         }
488 }
489
490 #else
491 static inline void mb_free_blocks_double(struct inode *inode,
492                                 struct ext4_buddy *e4b, int first, int count)
493 {
494         return;
495 }
496 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
497                                                 int first, int count)
498 {
499         return;
500 }
501 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
502 {
503         return;
504 }
505 #endif
506
507 #ifdef AGGRESSIVE_CHECK
508
509 #define MB_CHECK_ASSERT(assert)                                         \
510 do {                                                                    \
511         if (!(assert)) {                                                \
512                 printk(KERN_EMERG                                       \
513                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
514                         function, file, line, # assert);                \
515                 BUG();                                                  \
516         }                                                               \
517 } while (0)
518
519 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
520                                 const char *function, int line)
521 {
522         struct super_block *sb = e4b->bd_sb;
523         int order = e4b->bd_blkbits + 1;
524         int max;
525         int max2;
526         int i;
527         int j;
528         int k;
529         int count;
530         struct ext4_group_info *grp;
531         int fragments = 0;
532         int fstart;
533         struct list_head *cur;
534         void *buddy;
535         void *buddy2;
536
537         {
538                 static int mb_check_counter;
539                 if (mb_check_counter++ % 100 != 0)
540                         return 0;
541         }
542
543         while (order > 1) {
544                 buddy = mb_find_buddy(e4b, order, &max);
545                 MB_CHECK_ASSERT(buddy);
546                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
547                 MB_CHECK_ASSERT(buddy2);
548                 MB_CHECK_ASSERT(buddy != buddy2);
549                 MB_CHECK_ASSERT(max * 2 == max2);
550
551                 count = 0;
552                 for (i = 0; i < max; i++) {
553
554                         if (mb_test_bit(i, buddy)) {
555                                 /* only single bit in buddy2 may be 1 */
556                                 if (!mb_test_bit(i << 1, buddy2)) {
557                                         MB_CHECK_ASSERT(
558                                                 mb_test_bit((i<<1)+1, buddy2));
559                                 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
560                                         MB_CHECK_ASSERT(
561                                                 mb_test_bit(i << 1, buddy2));
562                                 }
563                                 continue;
564                         }
565
566                         /* both bits in buddy2 must be 0 */
567                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
568                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
569
570                         for (j = 0; j < (1 << order); j++) {
571                                 k = (i * (1 << order)) + j;
572                                 MB_CHECK_ASSERT(
573                                         !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
574                         }
575                         count++;
576                 }
577                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
578                 order--;
579         }
580
581         fstart = -1;
582         buddy = mb_find_buddy(e4b, 0, &max);
583         for (i = 0; i < max; i++) {
584                 if (!mb_test_bit(i, buddy)) {
585                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
586                         if (fstart == -1) {
587                                 fragments++;
588                                 fstart = i;
589                         }
590                         continue;
591                 }
592                 fstart = -1;
593                 /* check used bits only */
594                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
595                         buddy2 = mb_find_buddy(e4b, j, &max2);
596                         k = i >> j;
597                         MB_CHECK_ASSERT(k < max2);
598                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
599                 }
600         }
601         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
602         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
603
604         grp = ext4_get_group_info(sb, e4b->bd_group);
605         buddy = mb_find_buddy(e4b, 0, &max);
606         list_for_each(cur, &grp->bb_prealloc_list) {
607                 ext4_group_t groupnr;
608                 struct ext4_prealloc_space *pa;
609                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
610                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
611                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
612                 for (i = 0; i < pa->pa_len; i++)
613                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
614         }
615         return 0;
616 }
617 #undef MB_CHECK_ASSERT
618 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
619                                         __FILE__, __func__, __LINE__)
620 #else
621 #define mb_check_buddy(e4b)
622 #endif
623
624 /* FIXME!! need more doc */
625 static void ext4_mb_mark_free_simple(struct super_block *sb,
626                                 void *buddy, unsigned first, int len,
627                                         struct ext4_group_info *grp)
628 {
629         struct ext4_sb_info *sbi = EXT4_SB(sb);
630         unsigned short min;
631         unsigned short max;
632         unsigned short chunk;
633         unsigned short border;
634
635         BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
636
637         border = 2 << sb->s_blocksize_bits;
638
639         while (len > 0) {
640                 /* find how many blocks can be covered since this position */
641                 max = ffs(first | border) - 1;
642
643                 /* find how many blocks of power 2 we need to mark */
644                 min = fls(len) - 1;
645
646                 if (max < min)
647                         min = max;
648                 chunk = 1 << min;
649
650                 /* mark multiblock chunks only */
651                 grp->bb_counters[min]++;
652                 if (min > 0)
653                         mb_clear_bit(first >> min,
654                                      buddy + sbi->s_mb_offsets[min]);
655
656                 len -= chunk;
657                 first += chunk;
658         }
659 }
660
661 static void ext4_mb_generate_buddy(struct super_block *sb,
662                                 void *buddy, void *bitmap, ext4_group_t group)
663 {
664         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
665         unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
666         unsigned short i = 0;
667         unsigned short first;
668         unsigned short len;
669         unsigned free = 0;
670         unsigned fragments = 0;
671         unsigned long long period = get_cycles();
672
673         /* initialize buddy from bitmap which is aggregation
674          * of on-disk bitmap and preallocations */
675         i = mb_find_next_zero_bit(bitmap, max, 0);
676         grp->bb_first_free = i;
677         while (i < max) {
678                 fragments++;
679                 first = i;
680                 i = mb_find_next_bit(bitmap, max, i);
681                 len = i - first;
682                 free += len;
683                 if (len > 1)
684                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
685                 else
686                         grp->bb_counters[0]++;
687                 if (i < max)
688                         i = mb_find_next_zero_bit(bitmap, max, i);
689         }
690         grp->bb_fragments = fragments;
691
692         if (free != grp->bb_free) {
693                 ext4_error(sb, __func__,
694                         "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
695                         group, free, grp->bb_free);
696                 /*
697                  * If we intent to continue, we consider group descritor
698                  * corrupt and update bb_free using bitmap value
699                  */
700                 grp->bb_free = free;
701         }
702
703         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
704
705         period = get_cycles() - period;
706         spin_lock(&EXT4_SB(sb)->s_bal_lock);
707         EXT4_SB(sb)->s_mb_buddies_generated++;
708         EXT4_SB(sb)->s_mb_generation_time += period;
709         spin_unlock(&EXT4_SB(sb)->s_bal_lock);
710 }
711
712 /* The buddy information is attached the buddy cache inode
713  * for convenience. The information regarding each group
714  * is loaded via ext4_mb_load_buddy. The information involve
715  * block bitmap and buddy information. The information are
716  * stored in the inode as
717  *
718  * {                        page                        }
719  * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
720  *
721  *
722  * one block each for bitmap and buddy information.
723  * So for each group we take up 2 blocks. A page can
724  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
725  * So it can have information regarding groups_per_page which
726  * is blocks_per_page/2
727  */
728
729 static int ext4_mb_init_cache(struct page *page, char *incore)
730 {
731         int blocksize;
732         int blocks_per_page;
733         int groups_per_page;
734         int err = 0;
735         int i;
736         ext4_group_t first_group;
737         int first_block;
738         struct super_block *sb;
739         struct buffer_head *bhs;
740         struct buffer_head **bh;
741         struct inode *inode;
742         char *data;
743         char *bitmap;
744
745         mb_debug("init page %lu\n", page->index);
746
747         inode = page->mapping->host;
748         sb = inode->i_sb;
749         blocksize = 1 << inode->i_blkbits;
750         blocks_per_page = PAGE_CACHE_SIZE / blocksize;
751
752         groups_per_page = blocks_per_page >> 1;
753         if (groups_per_page == 0)
754                 groups_per_page = 1;
755
756         /* allocate buffer_heads to read bitmaps */
757         if (groups_per_page > 1) {
758                 err = -ENOMEM;
759                 i = sizeof(struct buffer_head *) * groups_per_page;
760                 bh = kzalloc(i, GFP_NOFS);
761                 if (bh == NULL)
762                         goto out;
763         } else
764                 bh = &bhs;
765
766         first_group = page->index * blocks_per_page / 2;
767
768         /* read all groups the page covers into the cache */
769         for (i = 0; i < groups_per_page; i++) {
770                 struct ext4_group_desc *desc;
771
772                 if (first_group + i >= EXT4_SB(sb)->s_groups_count)
773                         break;
774
775                 err = -EIO;
776                 desc = ext4_get_group_desc(sb, first_group + i, NULL);
777                 if (desc == NULL)
778                         goto out;
779
780                 err = -ENOMEM;
781                 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
782                 if (bh[i] == NULL)
783                         goto out;
784
785                 if (buffer_uptodate(bh[i]) &&
786                     !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
787                         continue;
788
789                 lock_buffer(bh[i]);
790                 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
791                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
792                         ext4_init_block_bitmap(sb, bh[i],
793                                                 first_group + i, desc);
794                         set_buffer_uptodate(bh[i]);
795                         unlock_buffer(bh[i]);
796                         spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
797                         continue;
798                 }
799                 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
800                 get_bh(bh[i]);
801                 bh[i]->b_end_io = end_buffer_read_sync;
802                 submit_bh(READ, bh[i]);
803                 mb_debug("read bitmap for group %u\n", first_group + i);
804         }
805
806         /* wait for I/O completion */
807         for (i = 0; i < groups_per_page && bh[i]; i++)
808                 wait_on_buffer(bh[i]);
809
810         err = -EIO;
811         for (i = 0; i < groups_per_page && bh[i]; i++)
812                 if (!buffer_uptodate(bh[i]))
813                         goto out;
814
815         err = 0;
816         first_block = page->index * blocks_per_page;
817         for (i = 0; i < blocks_per_page; i++) {
818                 int group;
819                 struct ext4_group_info *grinfo;
820
821                 group = (first_block + i) >> 1;
822                 if (group >= EXT4_SB(sb)->s_groups_count)
823                         break;
824
825                 /*
826                  * data carry information regarding this
827                  * particular group in the format specified
828                  * above
829                  *
830                  */
831                 data = page_address(page) + (i * blocksize);
832                 bitmap = bh[group - first_group]->b_data;
833
834                 /*
835                  * We place the buddy block and bitmap block
836                  * close together
837                  */
838                 if ((first_block + i) & 1) {
839                         /* this is block of buddy */
840                         BUG_ON(incore == NULL);
841                         mb_debug("put buddy for group %u in page %lu/%x\n",
842                                 group, page->index, i * blocksize);
843                         memset(data, 0xff, blocksize);
844                         grinfo = ext4_get_group_info(sb, group);
845                         grinfo->bb_fragments = 0;
846                         memset(grinfo->bb_counters, 0,
847                                sizeof(unsigned short)*(sb->s_blocksize_bits+2));
848                         /*
849                          * incore got set to the group block bitmap below
850                          */
851                         ext4_mb_generate_buddy(sb, data, incore, group);
852                         incore = NULL;
853                 } else {
854                         /* this is block of bitmap */
855                         BUG_ON(incore != NULL);
856                         mb_debug("put bitmap for group %u in page %lu/%x\n",
857                                 group, page->index, i * blocksize);
858
859                         /* see comments in ext4_mb_put_pa() */
860                         ext4_lock_group(sb, group);
861                         memcpy(data, bitmap, blocksize);
862
863                         /* mark all preallocated blks used in in-core bitmap */
864                         ext4_mb_generate_from_pa(sb, data, group);
865                         ext4_unlock_group(sb, group);
866
867                         /* set incore so that the buddy information can be
868                          * generated using this
869                          */
870                         incore = data;
871                 }
872         }
873         SetPageUptodate(page);
874
875 out:
876         if (bh) {
877                 for (i = 0; i < groups_per_page && bh[i]; i++)
878                         brelse(bh[i]);
879                 if (bh != &bhs)
880                         kfree(bh);
881         }
882         return err;
883 }
884
885 static noinline_for_stack int
886 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
887                                         struct ext4_buddy *e4b)
888 {
889         struct ext4_sb_info *sbi = EXT4_SB(sb);
890         struct inode *inode = sbi->s_buddy_cache;
891         int blocks_per_page;
892         int block;
893         int pnum;
894         int poff;
895         struct page *page;
896         int ret;
897
898         mb_debug("load group %u\n", group);
899
900         blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
901
902         e4b->bd_blkbits = sb->s_blocksize_bits;
903         e4b->bd_info = ext4_get_group_info(sb, group);
904         e4b->bd_sb = sb;
905         e4b->bd_group = group;
906         e4b->bd_buddy_page = NULL;
907         e4b->bd_bitmap_page = NULL;
908
909         /*
910          * the buddy cache inode stores the block bitmap
911          * and buddy information in consecutive blocks.
912          * So for each group we need two blocks.
913          */
914         block = group * 2;
915         pnum = block / blocks_per_page;
916         poff = block % blocks_per_page;
917
918         /* we could use find_or_create_page(), but it locks page
919          * what we'd like to avoid in fast path ... */
920         page = find_get_page(inode->i_mapping, pnum);
921         if (page == NULL || !PageUptodate(page)) {
922                 if (page)
923                         page_cache_release(page);
924                 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
925                 if (page) {
926                         BUG_ON(page->mapping != inode->i_mapping);
927                         if (!PageUptodate(page)) {
928                                 ret = ext4_mb_init_cache(page, NULL);
929                                 if (ret) {
930                                         unlock_page(page);
931                                         goto err;
932                                 }
933                                 mb_cmp_bitmaps(e4b, page_address(page) +
934                                                (poff * sb->s_blocksize));
935                         }
936                         unlock_page(page);
937                 }
938         }
939         if (page == NULL || !PageUptodate(page)) {
940                 ret = -EIO;
941                 goto err;
942         }
943         e4b->bd_bitmap_page = page;
944         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
945         mark_page_accessed(page);
946
947         block++;
948         pnum = block / blocks_per_page;
949         poff = block % blocks_per_page;
950
951         page = find_get_page(inode->i_mapping, pnum);
952         if (page == NULL || !PageUptodate(page)) {
953                 if (page)
954                         page_cache_release(page);
955                 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
956                 if (page) {
957                         BUG_ON(page->mapping != inode->i_mapping);
958                         if (!PageUptodate(page)) {
959                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
960                                 if (ret) {
961                                         unlock_page(page);
962                                         goto err;
963                                 }
964                         }
965                         unlock_page(page);
966                 }
967         }
968         if (page == NULL || !PageUptodate(page)) {
969                 ret = -EIO;
970                 goto err;
971         }
972         e4b->bd_buddy_page = page;
973         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
974         mark_page_accessed(page);
975
976         BUG_ON(e4b->bd_bitmap_page == NULL);
977         BUG_ON(e4b->bd_buddy_page == NULL);
978
979         return 0;
980
981 err:
982         if (e4b->bd_bitmap_page)
983                 page_cache_release(e4b->bd_bitmap_page);
984         if (e4b->bd_buddy_page)
985                 page_cache_release(e4b->bd_buddy_page);
986         e4b->bd_buddy = NULL;
987         e4b->bd_bitmap = NULL;
988         return ret;
989 }
990
991 static void ext4_mb_release_desc(struct ext4_buddy *e4b)
992 {
993         if (e4b->bd_bitmap_page)
994                 page_cache_release(e4b->bd_bitmap_page);
995         if (e4b->bd_buddy_page)
996                 page_cache_release(e4b->bd_buddy_page);
997 }
998
999
1000 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1001 {
1002         int order = 1;
1003         void *bb;
1004
1005         BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1006         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1007
1008         bb = EXT4_MB_BUDDY(e4b);
1009         while (order <= e4b->bd_blkbits + 1) {
1010                 block = block >> 1;
1011                 if (!mb_test_bit(block, bb)) {
1012                         /* this block is part of buddy of order 'order' */
1013                         return order;
1014                 }
1015                 bb += 1 << (e4b->bd_blkbits - order);
1016                 order++;
1017         }
1018         return 0;
1019 }
1020
1021 static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1022 {
1023         __u32 *addr;
1024
1025         len = cur + len;
1026         while (cur < len) {
1027                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1028                         /* fast path: clear whole word at once */
1029                         addr = bm + (cur >> 3);
1030                         *addr = 0;
1031                         cur += 32;
1032                         continue;
1033                 }
1034                 mb_clear_bit_atomic(lock, cur, bm);
1035                 cur++;
1036         }
1037 }
1038
1039 static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1040 {
1041         __u32 *addr;
1042
1043         len = cur + len;
1044         while (cur < len) {
1045                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1046                         /* fast path: set whole word at once */
1047                         addr = bm + (cur >> 3);
1048                         *addr = 0xffffffff;
1049                         cur += 32;
1050                         continue;
1051                 }
1052                 mb_set_bit_atomic(lock, cur, bm);
1053                 cur++;
1054         }
1055 }
1056
1057 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1058                           int first, int count)
1059 {
1060         int block = 0;
1061         int max = 0;
1062         int order;
1063         void *buddy;
1064         void *buddy2;
1065         struct super_block *sb = e4b->bd_sb;
1066
1067         BUG_ON(first + count > (sb->s_blocksize << 3));
1068         BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1069         mb_check_buddy(e4b);
1070         mb_free_blocks_double(inode, e4b, first, count);
1071
1072         e4b->bd_info->bb_free += count;
1073         if (first < e4b->bd_info->bb_first_free)
1074                 e4b->bd_info->bb_first_free = first;
1075
1076         /* let's maintain fragments counter */
1077         if (first != 0)
1078                 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1079         if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1080                 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1081         if (block && max)
1082                 e4b->bd_info->bb_fragments--;
1083         else if (!block && !max)
1084                 e4b->bd_info->bb_fragments++;
1085
1086         /* let's maintain buddy itself */
1087         while (count-- > 0) {
1088                 block = first++;
1089                 order = 0;
1090
1091                 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1092                         ext4_fsblk_t blocknr;
1093                         blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1094                         blocknr += block;
1095                         blocknr +=
1096                             le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1097                         ext4_unlock_group(sb, e4b->bd_group);
1098                         ext4_error(sb, __func__, "double-free of inode"
1099                                    " %lu's block %llu(bit %u in group %u)",
1100                                    inode ? inode->i_ino : 0, blocknr, block,
1101                                    e4b->bd_group);
1102                         ext4_lock_group(sb, e4b->bd_group);
1103                 }
1104                 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1105                 e4b->bd_info->bb_counters[order]++;
1106
1107                 /* start of the buddy */
1108                 buddy = mb_find_buddy(e4b, order, &max);
1109
1110                 do {
1111                         block &= ~1UL;
1112                         if (mb_test_bit(block, buddy) ||
1113                                         mb_test_bit(block + 1, buddy))
1114                                 break;
1115
1116                         /* both the buddies are free, try to coalesce them */
1117                         buddy2 = mb_find_buddy(e4b, order + 1, &max);
1118
1119                         if (!buddy2)
1120                                 break;
1121
1122                         if (order > 0) {
1123                                 /* for special purposes, we don't set
1124                                  * free bits in bitmap */
1125                                 mb_set_bit(block, buddy);
1126                                 mb_set_bit(block + 1, buddy);
1127                         }
1128                         e4b->bd_info->bb_counters[order]--;
1129                         e4b->bd_info->bb_counters[order]--;
1130
1131                         block = block >> 1;
1132                         order++;
1133                         e4b->bd_info->bb_counters[order]++;
1134
1135                         mb_clear_bit(block, buddy2);
1136                         buddy = buddy2;
1137                 } while (1);
1138         }
1139         mb_check_buddy(e4b);
1140 }
1141
1142 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1143                                 int needed, struct ext4_free_extent *ex)
1144 {
1145         int next = block;
1146         int max;
1147         int ord;
1148         void *buddy;
1149
1150         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1151         BUG_ON(ex == NULL);
1152
1153         buddy = mb_find_buddy(e4b, order, &max);
1154         BUG_ON(buddy == NULL);
1155         BUG_ON(block >= max);
1156         if (mb_test_bit(block, buddy)) {
1157                 ex->fe_len = 0;
1158                 ex->fe_start = 0;
1159                 ex->fe_group = 0;
1160                 return 0;
1161         }
1162
1163         /* FIXME dorp order completely ? */
1164         if (likely(order == 0)) {
1165                 /* find actual order */
1166                 order = mb_find_order_for_block(e4b, block);
1167                 block = block >> order;
1168         }
1169
1170         ex->fe_len = 1 << order;
1171         ex->fe_start = block << order;
1172         ex->fe_group = e4b->bd_group;
1173
1174         /* calc difference from given start */
1175         next = next - ex->fe_start;
1176         ex->fe_len -= next;
1177         ex->fe_start += next;
1178
1179         while (needed > ex->fe_len &&
1180                (buddy = mb_find_buddy(e4b, order, &max))) {
1181
1182                 if (block + 1 >= max)
1183                         break;
1184
1185                 next = (block + 1) * (1 << order);
1186                 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1187                         break;
1188
1189                 ord = mb_find_order_for_block(e4b, next);
1190
1191                 order = ord;
1192                 block = next >> order;
1193                 ex->fe_len += 1 << order;
1194         }
1195
1196         BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1197         return ex->fe_len;
1198 }
1199
1200 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1201 {
1202         int ord;
1203         int mlen = 0;
1204         int max = 0;
1205         int cur;
1206         int start = ex->fe_start;
1207         int len = ex->fe_len;
1208         unsigned ret = 0;
1209         int len0 = len;
1210         void *buddy;
1211
1212         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1213         BUG_ON(e4b->bd_group != ex->fe_group);
1214         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1215         mb_check_buddy(e4b);
1216         mb_mark_used_double(e4b, start, len);
1217
1218         e4b->bd_info->bb_free -= len;
1219         if (e4b->bd_info->bb_first_free == start)
1220                 e4b->bd_info->bb_first_free += len;
1221
1222         /* let's maintain fragments counter */
1223         if (start != 0)
1224                 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1225         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1226                 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1227         if (mlen && max)
1228                 e4b->bd_info->bb_fragments++;
1229         else if (!mlen && !max)
1230                 e4b->bd_info->bb_fragments--;
1231
1232         /* let's maintain buddy itself */
1233         while (len) {
1234                 ord = mb_find_order_for_block(e4b, start);
1235
1236                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1237                         /* the whole chunk may be allocated at once! */
1238                         mlen = 1 << ord;
1239                         buddy = mb_find_buddy(e4b, ord, &max);
1240                         BUG_ON((start >> ord) >= max);
1241                         mb_set_bit(start >> ord, buddy);
1242                         e4b->bd_info->bb_counters[ord]--;
1243                         start += mlen;
1244                         len -= mlen;
1245                         BUG_ON(len < 0);
1246                         continue;
1247                 }
1248
1249                 /* store for history */
1250                 if (ret == 0)
1251                         ret = len | (ord << 16);
1252
1253                 /* we have to split large buddy */
1254                 BUG_ON(ord <= 0);
1255                 buddy = mb_find_buddy(e4b, ord, &max);
1256                 mb_set_bit(start >> ord, buddy);
1257                 e4b->bd_info->bb_counters[ord]--;
1258
1259                 ord--;
1260                 cur = (start >> ord) & ~1U;
1261                 buddy = mb_find_buddy(e4b, ord, &max);
1262                 mb_clear_bit(cur, buddy);
1263                 mb_clear_bit(cur + 1, buddy);
1264                 e4b->bd_info->bb_counters[ord]++;
1265                 e4b->bd_info->bb_counters[ord]++;
1266         }
1267
1268         mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1269                         EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1270         mb_check_buddy(e4b);
1271
1272         return ret;
1273 }
1274
1275 /*
1276  * Must be called under group lock!
1277  */
1278 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1279                                         struct ext4_buddy *e4b)
1280 {
1281         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1282         int ret;
1283
1284         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1285         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1286
1287         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1288         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1289         ret = mb_mark_used(e4b, &ac->ac_b_ex);
1290
1291         /* preallocation can change ac_b_ex, thus we store actually
1292          * allocated blocks for history */
1293         ac->ac_f_ex = ac->ac_b_ex;
1294
1295         ac->ac_status = AC_STATUS_FOUND;
1296         ac->ac_tail = ret & 0xffff;
1297         ac->ac_buddy = ret >> 16;
1298
1299         /* XXXXXXX: SUCH A HORRIBLE **CK */
1300         /*FIXME!! Why ? */
1301         ac->ac_bitmap_page = e4b->bd_bitmap_page;
1302         get_page(ac->ac_bitmap_page);
1303         ac->ac_buddy_page = e4b->bd_buddy_page;
1304         get_page(ac->ac_buddy_page);
1305
1306         /* store last allocated for subsequent stream allocation */
1307         if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1308                 spin_lock(&sbi->s_md_lock);
1309                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1310                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1311                 spin_unlock(&sbi->s_md_lock);
1312         }
1313 }
1314
1315 /*
1316  * regular allocator, for general purposes allocation
1317  */
1318
1319 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1320                                         struct ext4_buddy *e4b,
1321                                         int finish_group)
1322 {
1323         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1324         struct ext4_free_extent *bex = &ac->ac_b_ex;
1325         struct ext4_free_extent *gex = &ac->ac_g_ex;
1326         struct ext4_free_extent ex;
1327         int max;
1328
1329         if (ac->ac_status == AC_STATUS_FOUND)
1330                 return;
1331         /*
1332          * We don't want to scan for a whole year
1333          */
1334         if (ac->ac_found > sbi->s_mb_max_to_scan &&
1335                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1336                 ac->ac_status = AC_STATUS_BREAK;
1337                 return;
1338         }
1339
1340         /*
1341          * Haven't found good chunk so far, let's continue
1342          */
1343         if (bex->fe_len < gex->fe_len)
1344                 return;
1345
1346         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1347                         && bex->fe_group == e4b->bd_group) {
1348                 /* recheck chunk's availability - we don't know
1349                  * when it was found (within this lock-unlock
1350                  * period or not) */
1351                 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1352                 if (max >= gex->fe_len) {
1353                         ext4_mb_use_best_found(ac, e4b);
1354                         return;
1355                 }
1356         }
1357 }
1358
1359 /*
1360  * The routine checks whether found extent is good enough. If it is,
1361  * then the extent gets marked used and flag is set to the context
1362  * to stop scanning. Otherwise, the extent is compared with the
1363  * previous found extent and if new one is better, then it's stored
1364  * in the context. Later, the best found extent will be used, if
1365  * mballoc can't find good enough extent.
1366  *
1367  * FIXME: real allocation policy is to be designed yet!
1368  */
1369 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1370                                         struct ext4_free_extent *ex,
1371                                         struct ext4_buddy *e4b)
1372 {
1373         struct ext4_free_extent *bex = &ac->ac_b_ex;
1374         struct ext4_free_extent *gex = &ac->ac_g_ex;
1375
1376         BUG_ON(ex->fe_len <= 0);
1377         BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1378         BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1379         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1380
1381         ac->ac_found++;
1382
1383         /*
1384          * The special case - take what you catch first
1385          */
1386         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1387                 *bex = *ex;
1388                 ext4_mb_use_best_found(ac, e4b);
1389                 return;
1390         }
1391
1392         /*
1393          * Let's check whether the chuck is good enough
1394          */
1395         if (ex->fe_len == gex->fe_len) {
1396                 *bex = *ex;
1397                 ext4_mb_use_best_found(ac, e4b);
1398                 return;
1399         }
1400
1401         /*
1402          * If this is first found extent, just store it in the context
1403          */
1404         if (bex->fe_len == 0) {
1405                 *bex = *ex;
1406                 return;
1407         }
1408
1409         /*
1410          * If new found extent is better, store it in the context
1411          */
1412         if (bex->fe_len < gex->fe_len) {
1413                 /* if the request isn't satisfied, any found extent
1414                  * larger than previous best one is better */
1415                 if (ex->fe_len > bex->fe_len)
1416                         *bex = *ex;
1417         } else if (ex->fe_len > gex->fe_len) {
1418                 /* if the request is satisfied, then we try to find
1419                  * an extent that still satisfy the request, but is
1420                  * smaller than previous one */
1421                 if (ex->fe_len < bex->fe_len)
1422                         *bex = *ex;
1423         }
1424
1425         ext4_mb_check_limits(ac, e4b, 0);
1426 }
1427
1428 static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1429                                         struct ext4_buddy *e4b)
1430 {
1431         struct ext4_free_extent ex = ac->ac_b_ex;
1432         ext4_group_t group = ex.fe_group;
1433         int max;
1434         int err;
1435
1436         BUG_ON(ex.fe_len <= 0);
1437         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1438         if (err)
1439                 return err;
1440
1441         ext4_lock_group(ac->ac_sb, group);
1442         max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1443
1444         if (max > 0) {
1445                 ac->ac_b_ex = ex;
1446                 ext4_mb_use_best_found(ac, e4b);
1447         }
1448
1449         ext4_unlock_group(ac->ac_sb, group);
1450         ext4_mb_release_desc(e4b);
1451
1452         return 0;
1453 }
1454
1455 static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1456                                 struct ext4_buddy *e4b)
1457 {
1458         ext4_group_t group = ac->ac_g_ex.fe_group;
1459         int max;
1460         int err;
1461         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1462         struct ext4_super_block *es = sbi->s_es;
1463         struct ext4_free_extent ex;
1464
1465         if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1466                 return 0;
1467
1468         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1469         if (err)
1470                 return err;
1471
1472         ext4_lock_group(ac->ac_sb, group);
1473         max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1474                              ac->ac_g_ex.fe_len, &ex);
1475
1476         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1477                 ext4_fsblk_t start;
1478
1479                 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1480                         ex.fe_start + le32_to_cpu(es->s_first_data_block);
1481                 /* use do_div to get remainder (would be 64-bit modulo) */
1482                 if (do_div(start, sbi->s_stripe) == 0) {
1483                         ac->ac_found++;
1484                         ac->ac_b_ex = ex;
1485                         ext4_mb_use_best_found(ac, e4b);
1486                 }
1487         } else if (max >= ac->ac_g_ex.fe_len) {
1488                 BUG_ON(ex.fe_len <= 0);
1489                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1490                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1491                 ac->ac_found++;
1492                 ac->ac_b_ex = ex;
1493                 ext4_mb_use_best_found(ac, e4b);
1494         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1495                 /* Sometimes, caller may want to merge even small
1496                  * number of blocks to an existing extent */
1497                 BUG_ON(ex.fe_len <= 0);
1498                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1499                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1500                 ac->ac_found++;
1501                 ac->ac_b_ex = ex;
1502                 ext4_mb_use_best_found(ac, e4b);
1503         }
1504         ext4_unlock_group(ac->ac_sb, group);
1505         ext4_mb_release_desc(e4b);
1506
1507         return 0;
1508 }
1509
1510 /*
1511  * The routine scans buddy structures (not bitmap!) from given order
1512  * to max order and tries to find big enough chunk to satisfy the req
1513  */
1514 static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1515                                         struct ext4_buddy *e4b)
1516 {
1517         struct super_block *sb = ac->ac_sb;
1518         struct ext4_group_info *grp = e4b->bd_info;
1519         void *buddy;
1520         int i;
1521         int k;
1522         int max;
1523
1524         BUG_ON(ac->ac_2order <= 0);
1525         for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1526                 if (grp->bb_counters[i] == 0)
1527                         continue;
1528
1529                 buddy = mb_find_buddy(e4b, i, &max);
1530                 BUG_ON(buddy == NULL);
1531
1532                 k = mb_find_next_zero_bit(buddy, max, 0);
1533                 BUG_ON(k >= max);
1534
1535                 ac->ac_found++;
1536
1537                 ac->ac_b_ex.fe_len = 1 << i;
1538                 ac->ac_b_ex.fe_start = k << i;
1539                 ac->ac_b_ex.fe_group = e4b->bd_group;
1540
1541                 ext4_mb_use_best_found(ac, e4b);
1542
1543                 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1544
1545                 if (EXT4_SB(sb)->s_mb_stats)
1546                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1547
1548                 break;
1549         }
1550 }
1551
1552 /*
1553  * The routine scans the group and measures all found extents.
1554  * In order to optimize scanning, caller must pass number of
1555  * free blocks in the group, so the routine can know upper limit.
1556  */
1557 static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1558                                         struct ext4_buddy *e4b)
1559 {
1560         struct super_block *sb = ac->ac_sb;
1561         void *bitmap = EXT4_MB_BITMAP(e4b);
1562         struct ext4_free_extent ex;
1563         int i;
1564         int free;
1565
1566         free = e4b->bd_info->bb_free;
1567         BUG_ON(free <= 0);
1568
1569         i = e4b->bd_info->bb_first_free;
1570
1571         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1572                 i = mb_find_next_zero_bit(bitmap,
1573                                                 EXT4_BLOCKS_PER_GROUP(sb), i);
1574                 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1575                         /*
1576                          * IF we have corrupt bitmap, we won't find any
1577                          * free blocks even though group info says we
1578                          * we have free blocks
1579                          */
1580                         ext4_error(sb, __func__, "%d free blocks as per "
1581                                         "group info. But bitmap says 0",
1582                                         free);
1583                         break;
1584                 }
1585
1586                 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1587                 BUG_ON(ex.fe_len <= 0);
1588                 if (free < ex.fe_len) {
1589                         ext4_error(sb, __func__, "%d free blocks as per "
1590                                         "group info. But got %d blocks",
1591                                         free, ex.fe_len);
1592                         /*
1593                          * The number of free blocks differs. This mostly
1594                          * indicate that the bitmap is corrupt. So exit
1595                          * without claiming the space.
1596                          */
1597                         break;
1598                 }
1599
1600                 ext4_mb_measure_extent(ac, &ex, e4b);
1601
1602                 i += ex.fe_len;
1603                 free -= ex.fe_len;
1604         }
1605
1606         ext4_mb_check_limits(ac, e4b, 1);
1607 }
1608
1609 /*
1610  * This is a special case for storages like raid5
1611  * we try to find stripe-aligned chunks for stripe-size requests
1612  * XXX should do so at least for multiples of stripe size as well
1613  */
1614 static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1615                                  struct ext4_buddy *e4b)
1616 {
1617         struct super_block *sb = ac->ac_sb;
1618         struct ext4_sb_info *sbi = EXT4_SB(sb);
1619         void *bitmap = EXT4_MB_BITMAP(e4b);
1620         struct ext4_free_extent ex;
1621         ext4_fsblk_t first_group_block;
1622         ext4_fsblk_t a;
1623         ext4_grpblk_t i;
1624         int max;
1625
1626         BUG_ON(sbi->s_stripe == 0);
1627
1628         /* find first stripe-aligned block in group */
1629         first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1630                 + le32_to_cpu(sbi->s_es->s_first_data_block);
1631         a = first_group_block + sbi->s_stripe - 1;
1632         do_div(a, sbi->s_stripe);
1633         i = (a * sbi->s_stripe) - first_group_block;
1634
1635         while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1636                 if (!mb_test_bit(i, bitmap)) {
1637                         max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1638                         if (max >= sbi->s_stripe) {
1639                                 ac->ac_found++;
1640                                 ac->ac_b_ex = ex;
1641                                 ext4_mb_use_best_found(ac, e4b);
1642                                 break;
1643                         }
1644                 }
1645                 i += sbi->s_stripe;
1646         }
1647 }
1648
1649 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1650                                 ext4_group_t group, int cr)
1651 {
1652         unsigned free, fragments;
1653         unsigned i, bits;
1654         struct ext4_group_desc *desc;
1655         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1656
1657         BUG_ON(cr < 0 || cr >= 4);
1658         BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1659
1660         free = grp->bb_free;
1661         fragments = grp->bb_fragments;
1662         if (free == 0)
1663                 return 0;
1664         if (fragments == 0)
1665                 return 0;
1666
1667         switch (cr) {
1668         case 0:
1669                 BUG_ON(ac->ac_2order == 0);
1670                 /* If this group is uninitialized, skip it initially */
1671                 desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1672                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1673                         return 0;
1674
1675                 bits = ac->ac_sb->s_blocksize_bits + 1;
1676                 for (i = ac->ac_2order; i <= bits; i++)
1677                         if (grp->bb_counters[i] > 0)
1678                                 return 1;
1679                 break;
1680         case 1:
1681                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1682                         return 1;
1683                 break;
1684         case 2:
1685                 if (free >= ac->ac_g_ex.fe_len)
1686                         return 1;
1687                 break;
1688         case 3:
1689                 return 1;
1690         default:
1691                 BUG();
1692         }
1693
1694         return 0;
1695 }
1696
1697 static noinline_for_stack int
1698 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1699 {
1700         ext4_group_t group;
1701         ext4_group_t i;
1702         int cr;
1703         int err = 0;
1704         int bsbits;
1705         struct ext4_sb_info *sbi;
1706         struct super_block *sb;
1707         struct ext4_buddy e4b;
1708         loff_t size, isize;
1709
1710         sb = ac->ac_sb;
1711         sbi = EXT4_SB(sb);
1712         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1713
1714         /* first, try the goal */
1715         err = ext4_mb_find_by_goal(ac, &e4b);
1716         if (err || ac->ac_status == AC_STATUS_FOUND)
1717                 goto out;
1718
1719         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1720                 goto out;
1721
1722         /*
1723          * ac->ac2_order is set only if the fe_len is a power of 2
1724          * if ac2_order is set we also set criteria to 0 so that we
1725          * try exact allocation using buddy.
1726          */
1727         i = fls(ac->ac_g_ex.fe_len);
1728         ac->ac_2order = 0;
1729         /*
1730          * We search using buddy data only if the order of the request
1731          * is greater than equal to the sbi_s_mb_order2_reqs
1732          * You can tune it via /proc/fs/ext4/<partition>/order2_req
1733          */
1734         if (i >= sbi->s_mb_order2_reqs) {
1735                 /*
1736                  * This should tell if fe_len is exactly power of 2
1737                  */
1738                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1739                         ac->ac_2order = i - 1;
1740         }
1741
1742         bsbits = ac->ac_sb->s_blocksize_bits;
1743         /* if stream allocation is enabled, use global goal */
1744         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1745         isize = i_size_read(ac->ac_inode) >> bsbits;
1746         if (size < isize)
1747                 size = isize;
1748
1749         if (size < sbi->s_mb_stream_request &&
1750                         (ac->ac_flags & EXT4_MB_HINT_DATA)) {
1751                 /* TBD: may be hot point */
1752                 spin_lock(&sbi->s_md_lock);
1753                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1754                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1755                 spin_unlock(&sbi->s_md_lock);
1756         }
1757         /* Let's just scan groups to find more-less suitable blocks */
1758         cr = ac->ac_2order ? 0 : 1;
1759         /*
1760          * cr == 0 try to get exact allocation,
1761          * cr == 3  try to get anything
1762          */
1763 repeat:
1764         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1765                 ac->ac_criteria = cr;
1766                 /*
1767                  * searching for the right group start
1768                  * from the goal value specified
1769                  */
1770                 group = ac->ac_g_ex.fe_group;
1771
1772                 for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
1773                         struct ext4_group_info *grp;
1774                         struct ext4_group_desc *desc;
1775
1776                         if (group == EXT4_SB(sb)->s_groups_count)
1777                                 group = 0;
1778
1779                         /* quick check to skip empty groups */
1780                         grp = ext4_get_group_info(ac->ac_sb, group);
1781                         if (grp->bb_free == 0)
1782                                 continue;
1783
1784                         /*
1785                          * if the group is already init we check whether it is
1786                          * a good group and if not we don't load the buddy
1787                          */
1788                         if (EXT4_MB_GRP_NEED_INIT(grp)) {
1789                                 /*
1790                                  * we need full data about the group
1791                                  * to make a good selection
1792                                  */
1793                                 err = ext4_mb_load_buddy(sb, group, &e4b);
1794                                 if (err)
1795                                         goto out;
1796                                 ext4_mb_release_desc(&e4b);
1797                         }
1798
1799                         /*
1800                          * If the particular group doesn't satisfy our
1801                          * criteria we continue with the next group
1802                          */
1803                         if (!ext4_mb_good_group(ac, group, cr))
1804                                 continue;
1805
1806                         err = ext4_mb_load_buddy(sb, group, &e4b);
1807                         if (err)
1808                                 goto out;
1809
1810                         ext4_lock_group(sb, group);
1811                         if (!ext4_mb_good_group(ac, group, cr)) {
1812                                 /* someone did allocation from this group */
1813                                 ext4_unlock_group(sb, group);
1814                                 ext4_mb_release_desc(&e4b);
1815                                 continue;
1816                         }
1817
1818                         ac->ac_groups_scanned++;
1819                         desc = ext4_get_group_desc(sb, group, NULL);
1820                         if (cr == 0 || (desc->bg_flags &
1821                                         cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
1822                                         ac->ac_2order != 0))
1823                                 ext4_mb_simple_scan_group(ac, &e4b);
1824                         else if (cr == 1 &&
1825                                         ac->ac_g_ex.fe_len == sbi->s_stripe)
1826                                 ext4_mb_scan_aligned(ac, &e4b);
1827                         else
1828                                 ext4_mb_complex_scan_group(ac, &e4b);
1829
1830                         ext4_unlock_group(sb, group);
1831                         ext4_mb_release_desc(&e4b);
1832
1833                         if (ac->ac_status != AC_STATUS_CONTINUE)
1834                                 break;
1835                 }
1836         }
1837
1838         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
1839             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1840                 /*
1841                  * We've been searching too long. Let's try to allocate
1842                  * the best chunk we've found so far
1843                  */
1844
1845                 ext4_mb_try_best_found(ac, &e4b);
1846                 if (ac->ac_status != AC_STATUS_FOUND) {
1847                         /*
1848                          * Someone more lucky has already allocated it.
1849                          * The only thing we can do is just take first
1850                          * found block(s)
1851                         printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
1852                          */
1853                         ac->ac_b_ex.fe_group = 0;
1854                         ac->ac_b_ex.fe_start = 0;
1855                         ac->ac_b_ex.fe_len = 0;
1856                         ac->ac_status = AC_STATUS_CONTINUE;
1857                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
1858                         cr = 3;
1859                         atomic_inc(&sbi->s_mb_lost_chunks);
1860                         goto repeat;
1861                 }
1862         }
1863 out:
1864         return err;
1865 }
1866
1867 #ifdef EXT4_MB_HISTORY
1868 struct ext4_mb_proc_session {
1869         struct ext4_mb_history *history;
1870         struct super_block *sb;
1871         int start;
1872         int max;
1873 };
1874
1875 static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
1876                                         struct ext4_mb_history *hs,
1877                                         int first)
1878 {
1879         if (hs == s->history + s->max)
1880                 hs = s->history;
1881         if (!first && hs == s->history + s->start)
1882                 return NULL;
1883         while (hs->orig.fe_len == 0) {
1884                 hs++;
1885                 if (hs == s->history + s->max)
1886                         hs = s->history;
1887                 if (hs == s->history + s->start)
1888                         return NULL;
1889         }
1890         return hs;
1891 }
1892
1893 static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
1894 {
1895         struct ext4_mb_proc_session *s = seq->private;
1896         struct ext4_mb_history *hs;
1897         int l = *pos;
1898
1899         if (l == 0)
1900                 return SEQ_START_TOKEN;
1901         hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1902         if (!hs)
1903                 return NULL;
1904         while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
1905         return hs;
1906 }
1907
1908 static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
1909                                       loff_t *pos)
1910 {
1911         struct ext4_mb_proc_session *s = seq->private;
1912         struct ext4_mb_history *hs = v;
1913
1914         ++*pos;
1915         if (v == SEQ_START_TOKEN)
1916                 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1917         else
1918                 return ext4_mb_history_skip_empty(s, ++hs, 0);
1919 }
1920
1921 static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1922 {
1923         char buf[25], buf2[25], buf3[25], *fmt;
1924         struct ext4_mb_history *hs = v;
1925
1926         if (v == SEQ_START_TOKEN) {
1927                 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
1928                                 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
1929                           "pid", "inode", "original", "goal", "result", "found",
1930                            "grps", "cr", "flags", "merge", "tail", "broken");
1931                 return 0;
1932         }
1933
1934         if (hs->op == EXT4_MB_HISTORY_ALLOC) {
1935                 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
1936                         "%-5u %-5s %-5u %-6u\n";
1937                 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1938                         hs->result.fe_start, hs->result.fe_len,
1939                         hs->result.fe_logical);
1940                 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1941                         hs->orig.fe_start, hs->orig.fe_len,
1942                         hs->orig.fe_logical);
1943                 sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
1944                         hs->goal.fe_start, hs->goal.fe_len,
1945                         hs->goal.fe_logical);
1946                 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
1947                                 hs->found, hs->groups, hs->cr, hs->flags,
1948                                 hs->merged ? "M" : "", hs->tail,
1949                                 hs->buddy ? 1 << hs->buddy : 0);
1950         } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
1951                 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
1952                 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1953                         hs->result.fe_start, hs->result.fe_len,
1954                         hs->result.fe_logical);
1955                 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1956                         hs->orig.fe_start, hs->orig.fe_len,
1957                         hs->orig.fe_logical);
1958                 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
1959         } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
1960                 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1961                         hs->result.fe_start, hs->result.fe_len);
1962                 seq_printf(seq, "%-5u %-8u %-23s discard\n",
1963                                 hs->pid, hs->ino, buf2);
1964         } else if (hs->op == EXT4_MB_HISTORY_FREE) {
1965                 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1966                         hs->result.fe_start, hs->result.fe_len);
1967                 seq_printf(seq, "%-5u %-8u %-23s free\n",
1968                                 hs->pid, hs->ino, buf2);
1969         }
1970         return 0;
1971 }
1972
1973 static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
1974 {
1975 }
1976
1977 static struct seq_operations ext4_mb_seq_history_ops = {
1978         .start  = ext4_mb_seq_history_start,
1979         .next   = ext4_mb_seq_history_next,
1980         .stop   = ext4_mb_seq_history_stop,
1981         .show   = ext4_mb_seq_history_show,
1982 };
1983
1984 static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
1985 {
1986         struct super_block *sb = PDE(inode)->data;
1987         struct ext4_sb_info *sbi = EXT4_SB(sb);
1988         struct ext4_mb_proc_session *s;
1989         int rc;
1990         int size;
1991
1992         if (unlikely(sbi->s_mb_history == NULL))
1993                 return -ENOMEM;
1994         s = kmalloc(sizeof(*s), GFP_KERNEL);
1995         if (s == NULL)
1996                 return -ENOMEM;
1997         s->sb = sb;
1998         size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
1999         s->history = kmalloc(size, GFP_KERNEL);
2000         if (s->history == NULL) {
2001                 kfree(s);
2002                 return -ENOMEM;
2003         }
2004
2005         spin_lock(&sbi->s_mb_history_lock);
2006         memcpy(s->history, sbi->s_mb_history, size);
2007         s->max = sbi->s_mb_history_max;
2008         s->start = sbi->s_mb_history_cur % s->max;
2009         spin_unlock(&sbi->s_mb_history_lock);
2010
2011         rc = seq_open(file, &ext4_mb_seq_history_ops);
2012         if (rc == 0) {
2013                 struct seq_file *m = (struct seq_file *)file->private_data;
2014                 m->private = s;
2015         } else {
2016                 kfree(s->history);
2017                 kfree(s);
2018         }
2019         return rc;
2020
2021 }
2022
2023 static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2024 {
2025         struct seq_file *seq = (struct seq_file *)file->private_data;
2026         struct ext4_mb_proc_session *s = seq->private;
2027         kfree(s->history);
2028         kfree(s);
2029         return seq_release(inode, file);
2030 }
2031
2032 static ssize_t ext4_mb_seq_history_write(struct file *file,
2033                                 const char __user *buffer,
2034                                 size_t count, loff_t *ppos)
2035 {
2036         struct seq_file *seq = (struct seq_file *)file->private_data;
2037         struct ext4_mb_proc_session *s = seq->private;
2038         struct super_block *sb = s->sb;
2039         char str[32];
2040         int value;
2041
2042         if (count >= sizeof(str)) {
2043                 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2044                                 "mb_history", (int)sizeof(str));
2045                 return -EOVERFLOW;
2046         }
2047
2048         if (copy_from_user(str, buffer, count))
2049                 return -EFAULT;
2050
2051         value = simple_strtol(str, NULL, 0);
2052         if (value < 0)
2053                 return -ERANGE;
2054         EXT4_SB(sb)->s_mb_history_filter = value;
2055
2056         return count;
2057 }
2058
2059 static struct file_operations ext4_mb_seq_history_fops = {
2060         .owner          = THIS_MODULE,
2061         .open           = ext4_mb_seq_history_open,
2062         .read           = seq_read,
2063         .write          = ext4_mb_seq_history_write,
2064         .llseek         = seq_lseek,
2065         .release        = ext4_mb_seq_history_release,
2066 };
2067
2068 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2069 {
2070         struct super_block *sb = seq->private;
2071         struct ext4_sb_info *sbi = EXT4_SB(sb);
2072         ext4_group_t group;
2073
2074         if (*pos < 0 || *pos >= sbi->s_groups_count)
2075                 return NULL;
2076
2077         group = *pos + 1;
2078         return (void *) ((unsigned long) group);
2079 }
2080
2081 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2082 {
2083         struct super_block *sb = seq->private;
2084         struct ext4_sb_info *sbi = EXT4_SB(sb);
2085         ext4_group_t group;
2086
2087         ++*pos;
2088         if (*pos < 0 || *pos >= sbi->s_groups_count)
2089                 return NULL;
2090         group = *pos + 1;
2091         return (void *) ((unsigned long) group);
2092 }
2093
2094 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2095 {
2096         struct super_block *sb = seq->private;
2097         ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2098         int i;
2099         int err;
2100         struct ext4_buddy e4b;
2101         struct sg {
2102                 struct ext4_group_info info;
2103                 unsigned short counters[16];
2104         } sg;
2105
2106         group--;
2107         if (group == 0)
2108                 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2109                                 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2110                                   "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2111                            "group", "free", "frags", "first",
2112                            "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2113                            "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2114
2115         i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2116                 sizeof(struct ext4_group_info);
2117         err = ext4_mb_load_buddy(sb, group, &e4b);
2118         if (err) {
2119                 seq_printf(seq, "#%-5u: I/O error\n", group);
2120                 return 0;
2121         }
2122         ext4_lock_group(sb, group);
2123         memcpy(&sg, ext4_get_group_info(sb, group), i);
2124         ext4_unlock_group(sb, group);
2125         ext4_mb_release_desc(&e4b);
2126
2127         seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2128                         sg.info.bb_fragments, sg.info.bb_first_free);
2129         for (i = 0; i <= 13; i++)
2130                 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2131                                 sg.info.bb_counters[i] : 0);
2132         seq_printf(seq, " ]\n");
2133
2134         return 0;
2135 }
2136
2137 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2138 {
2139 }
2140
2141 static struct seq_operations ext4_mb_seq_groups_ops = {
2142         .start  = ext4_mb_seq_groups_start,
2143         .next   = ext4_mb_seq_groups_next,
2144         .stop   = ext4_mb_seq_groups_stop,
2145         .show   = ext4_mb_seq_groups_show,
2146 };
2147
2148 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2149 {
2150         struct super_block *sb = PDE(inode)->data;
2151         int rc;
2152
2153         rc = seq_open(file, &ext4_mb_seq_groups_ops);
2154         if (rc == 0) {
2155                 struct seq_file *m = (struct seq_file *)file->private_data;
2156                 m->private = sb;
2157         }
2158         return rc;
2159
2160 }
2161
2162 static struct file_operations ext4_mb_seq_groups_fops = {
2163         .owner          = THIS_MODULE,
2164         .open           = ext4_mb_seq_groups_open,
2165         .read           = seq_read,
2166         .llseek         = seq_lseek,
2167         .release        = seq_release,
2168 };
2169
2170 static void ext4_mb_history_release(struct super_block *sb)
2171 {
2172         struct ext4_sb_info *sbi = EXT4_SB(sb);
2173
2174         if (sbi->s_proc != NULL) {
2175                 remove_proc_entry("mb_groups", sbi->s_proc);
2176                 remove_proc_entry("mb_history", sbi->s_proc);
2177         }
2178         kfree(sbi->s_mb_history);
2179 }
2180
2181 static void ext4_mb_history_init(struct super_block *sb)
2182 {
2183         struct ext4_sb_info *sbi = EXT4_SB(sb);
2184         int i;
2185
2186         if (sbi->s_proc != NULL) {
2187                 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2188                                  &ext4_mb_seq_history_fops, sb);
2189                 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2190                                  &ext4_mb_seq_groups_fops, sb);
2191         }
2192
2193         sbi->s_mb_history_max = 1000;
2194         sbi->s_mb_history_cur = 0;
2195         spin_lock_init(&sbi->s_mb_history_lock);
2196         i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2197         sbi->s_mb_history = kzalloc(i, GFP_KERNEL);
2198         /* if we can't allocate history, then we simple won't use it */
2199 }
2200
2201 static noinline_for_stack void
2202 ext4_mb_store_history(struct ext4_allocation_context *ac)
2203 {
2204         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2205         struct ext4_mb_history h;
2206
2207         if (unlikely(sbi->s_mb_history == NULL))
2208                 return;
2209
2210         if (!(ac->ac_op & sbi->s_mb_history_filter))
2211                 return;
2212
2213         h.op = ac->ac_op;
2214         h.pid = current->pid;
2215         h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2216         h.orig = ac->ac_o_ex;
2217         h.result = ac->ac_b_ex;
2218         h.flags = ac->ac_flags;
2219         h.found = ac->ac_found;
2220         h.groups = ac->ac_groups_scanned;
2221         h.cr = ac->ac_criteria;
2222         h.tail = ac->ac_tail;
2223         h.buddy = ac->ac_buddy;
2224         h.merged = 0;
2225         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2226                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2227                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2228                         h.merged = 1;
2229                 h.goal = ac->ac_g_ex;
2230                 h.result = ac->ac_f_ex;
2231         }
2232
2233         spin_lock(&sbi->s_mb_history_lock);
2234         memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2235         if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2236                 sbi->s_mb_history_cur = 0;
2237         spin_unlock(&sbi->s_mb_history_lock);
2238 }
2239
2240 #else
2241 #define ext4_mb_history_release(sb)
2242 #define ext4_mb_history_init(sb)
2243 #endif
2244
2245
2246 /* Create and initialize ext4_group_info data for the given group. */
2247 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2248                           struct ext4_group_desc *desc)
2249 {
2250         int i, len;
2251         int metalen = 0;
2252         struct ext4_sb_info *sbi = EXT4_SB(sb);
2253         struct ext4_group_info **meta_group_info;
2254
2255         /*
2256          * First check if this group is the first of a reserved block.
2257          * If it's true, we have to allocate a new table of pointers
2258          * to ext4_group_info structures
2259          */
2260         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2261                 metalen = sizeof(*meta_group_info) <<
2262                         EXT4_DESC_PER_BLOCK_BITS(sb);
2263                 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2264                 if (meta_group_info == NULL) {
2265                         printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2266                                "buddy group\n");
2267                         goto exit_meta_group_info;
2268                 }
2269                 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2270                         meta_group_info;
2271         }
2272
2273         /*
2274          * calculate needed size. if change bb_counters size,
2275          * don't forget about ext4_mb_generate_buddy()
2276          */
2277         len = offsetof(typeof(**meta_group_info),
2278                        bb_counters[sb->s_blocksize_bits + 2]);
2279
2280         meta_group_info =
2281                 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2282         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2283
2284         meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2285         if (meta_group_info[i] == NULL) {
2286                 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2287                 goto exit_group_info;
2288         }
2289         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2290                 &(meta_group_info[i]->bb_state));
2291
2292         /*
2293          * initialize bb_free to be able to skip
2294          * empty groups without initialization
2295          */
2296         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2297                 meta_group_info[i]->bb_free =
2298                         ext4_free_blocks_after_init(sb, group, desc);
2299         } else {
2300                 meta_group_info[i]->bb_free =
2301                         le16_to_cpu(desc->bg_free_blocks_count);
2302         }
2303
2304         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2305         meta_group_info[i]->bb_free_root.rb_node = NULL;;
2306
2307 #ifdef DOUBLE_CHECK
2308         {
2309                 struct buffer_head *bh;
2310                 meta_group_info[i]->bb_bitmap =
2311                         kmalloc(sb->s_blocksize, GFP_KERNEL);
2312                 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2313                 bh = ext4_read_block_bitmap(sb, group);
2314                 BUG_ON(bh == NULL);
2315                 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2316                         sb->s_blocksize);
2317                 put_bh(bh);
2318         }
2319 #endif
2320
2321         return 0;
2322
2323 exit_group_info:
2324         /* If a meta_group_info table has been allocated, release it now */
2325         if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2326                 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2327 exit_meta_group_info:
2328         return -ENOMEM;
2329 } /* ext4_mb_add_groupinfo */
2330
2331 /*
2332  * Add a group to the existing groups.
2333  * This function is used for online resize
2334  */
2335 int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
2336                                struct ext4_group_desc *desc)
2337 {
2338         struct ext4_sb_info *sbi = EXT4_SB(sb);
2339         struct inode *inode = sbi->s_buddy_cache;
2340         int blocks_per_page;
2341         int block;
2342         int pnum;
2343         struct page *page;
2344         int err;
2345
2346         /* Add group based on group descriptor*/
2347         err = ext4_mb_add_groupinfo(sb, group, desc);
2348         if (err)
2349                 return err;
2350
2351         /*
2352          * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
2353          * datas) are set not up to date so that they will be re-initilaized
2354          * during the next call to ext4_mb_load_buddy
2355          */
2356
2357         /* Set buddy page as not up to date */
2358         blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
2359         block = group * 2;
2360         pnum = block / blocks_per_page;
2361         page = find_get_page(inode->i_mapping, pnum);
2362         if (page != NULL) {
2363                 ClearPageUptodate(page);
2364                 page_cache_release(page);
2365         }
2366
2367         /* Set bitmap page as not up to date */
2368         block++;
2369         pnum = block / blocks_per_page;
2370         page = find_get_page(inode->i_mapping, pnum);
2371         if (page != NULL) {
2372                 ClearPageUptodate(page);
2373                 page_cache_release(page);
2374         }
2375
2376         return 0;
2377 }
2378
2379 /*
2380  * Update an existing group.
2381  * This function is used for online resize
2382  */
2383 void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2384 {
2385         grp->bb_free += add;
2386 }
2387
2388 static int ext4_mb_init_backend(struct super_block *sb)
2389 {
2390         ext4_group_t i;
2391         int metalen;
2392         struct ext4_sb_info *sbi = EXT4_SB(sb);
2393         struct ext4_super_block *es = sbi->s_es;
2394         int num_meta_group_infos;
2395         int num_meta_group_infos_max;
2396         int array_size;
2397         struct ext4_group_info **meta_group_info;
2398         struct ext4_group_desc *desc;
2399
2400         /* This is the number of blocks used by GDT */
2401         num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
2402                                 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2403
2404         /*
2405          * This is the total number of blocks used by GDT including
2406          * the number of reserved blocks for GDT.
2407          * The s_group_info array is allocated with this value
2408          * to allow a clean online resize without a complex
2409          * manipulation of pointer.
2410          * The drawback is the unused memory when no resize
2411          * occurs but it's very low in terms of pages
2412          * (see comments below)
2413          * Need to handle this properly when META_BG resizing is allowed
2414          */
2415         num_meta_group_infos_max = num_meta_group_infos +
2416                                 le16_to_cpu(es->s_reserved_gdt_blocks);
2417
2418         /*
2419          * array_size is the size of s_group_info array. We round it
2420          * to the next power of two because this approximation is done
2421          * internally by kmalloc so we can have some more memory
2422          * for free here (e.g. may be used for META_BG resize).
2423          */
2424         array_size = 1;
2425         while (array_size < sizeof(*sbi->s_group_info) *
2426                num_meta_group_infos_max)
2427                 array_size = array_size << 1;
2428         /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2429          * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2430          * So a two level scheme suffices for now. */
2431         sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
2432         if (sbi->s_group_info == NULL) {
2433                 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2434                 return -ENOMEM;
2435         }
2436         sbi->s_buddy_cache = new_inode(sb);
2437         if (sbi->s_buddy_cache == NULL) {
2438                 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2439                 goto err_freesgi;
2440         }
2441         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2442
2443         metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2444         for (i = 0; i < num_meta_group_infos; i++) {
2445                 if ((i + 1) == num_meta_group_infos)
2446                         metalen = sizeof(*meta_group_info) *
2447                                 (sbi->s_groups_count -
2448                                         (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2449                 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2450                 if (meta_group_info == NULL) {
2451                         printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2452                                "buddy group\n");
2453                         goto err_freemeta;
2454                 }
2455                 sbi->s_group_info[i] = meta_group_info;
2456         }
2457
2458         for (i = 0; i < sbi->s_groups_count; i++) {
2459                 desc = ext4_get_group_desc(sb, i, NULL);
2460                 if (desc == NULL) {
2461                         printk(KERN_ERR
2462                                 "EXT4-fs: can't read descriptor %u\n", i);
2463                         goto err_freebuddy;
2464                 }
2465                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2466                         goto err_freebuddy;
2467         }
2468
2469         return 0;
2470
2471 err_freebuddy:
2472         while (i-- > 0)
2473                 kfree(ext4_get_group_info(sb, i));
2474         i = num_meta_group_infos;
2475 err_freemeta:
2476         while (i-- > 0)
2477                 kfree(sbi->s_group_info[i]);
2478         iput(sbi->s_buddy_cache);
2479 err_freesgi:
2480         kfree(sbi->s_group_info);
2481         return -ENOMEM;
2482 }
2483
2484 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2485 {
2486         struct ext4_sb_info *sbi = EXT4_SB(sb);
2487         unsigned i, j;
2488         unsigned offset;
2489         unsigned max;
2490         int ret;
2491
2492         i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2493
2494         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2495         if (sbi->s_mb_offsets == NULL) {
2496                 return -ENOMEM;
2497         }
2498
2499         i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
2500         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2501         if (sbi->s_mb_maxs == NULL) {
2502                 kfree(sbi->s_mb_maxs);
2503                 return -ENOMEM;
2504         }
2505
2506         /* order 0 is regular bitmap */
2507         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2508         sbi->s_mb_offsets[0] = 0;
2509
2510         i = 1;
2511         offset = 0;
2512         max = sb->s_blocksize << 2;
2513         do {
2514                 sbi->s_mb_offsets[i] = offset;
2515                 sbi->s_mb_maxs[i] = max;
2516                 offset += 1 << (sb->s_blocksize_bits - i);
2517                 max = max >> 1;
2518                 i++;
2519         } while (i <= sb->s_blocksize_bits + 1);
2520
2521         /* init file for buddy data */
2522         ret = ext4_mb_init_backend(sb);
2523         if (ret != 0) {
2524                 kfree(sbi->s_mb_offsets);
2525                 kfree(sbi->s_mb_maxs);
2526                 return ret;
2527         }
2528
2529         spin_lock_init(&sbi->s_md_lock);
2530         spin_lock_init(&sbi->s_bal_lock);
2531
2532         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2533         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2534         sbi->s_mb_stats = MB_DEFAULT_STATS;
2535         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2536         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2537         sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2538         sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2539
2540         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2541         if (sbi->s_locality_groups == NULL) {
2542                 kfree(sbi->s_mb_offsets);
2543                 kfree(sbi->s_mb_maxs);
2544                 return -ENOMEM;
2545         }
2546         for_each_possible_cpu(i) {
2547                 struct ext4_locality_group *lg;
2548                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2549                 mutex_init(&lg->lg_mutex);
2550                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2551                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2552                 spin_lock_init(&lg->lg_prealloc_lock);
2553         }
2554
2555         ext4_mb_init_per_dev_proc(sb);
2556         ext4_mb_history_init(sb);
2557
2558         if (sbi->s_journal)
2559                 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2560
2561         printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2562         return 0;
2563 }
2564
2565 /* need to called with ext4 group lock (ext4_lock_group) */
2566 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2567 {
2568         struct ext4_prealloc_space *pa;
2569         struct list_head *cur, *tmp;
2570         int count = 0;
2571
2572         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2573                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2574                 list_del(&pa->pa_group_list);
2575                 count++;
2576                 kmem_cache_free(ext4_pspace_cachep, pa);
2577         }
2578         if (count)
2579                 mb_debug("mballoc: %u PAs left\n", count);
2580
2581 }
2582
2583 int ext4_mb_release(struct super_block *sb)
2584 {
2585         ext4_group_t i;
2586         int num_meta_group_infos;
2587         struct ext4_group_info *grinfo;
2588         struct ext4_sb_info *sbi = EXT4_SB(sb);
2589
2590         if (sbi->s_group_info) {
2591                 for (i = 0; i < sbi->s_groups_count; i++) {
2592                         grinfo = ext4_get_group_info(sb, i);
2593 #ifdef DOUBLE_CHECK
2594                         kfree(grinfo->bb_bitmap);
2595 #endif
2596                         ext4_lock_group(sb, i);
2597                         ext4_mb_cleanup_pa(grinfo);
2598                         ext4_unlock_group(sb, i);
2599                         kfree(grinfo);
2600                 }
2601                 num_meta_group_infos = (sbi->s_groups_count +
2602                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2603                         EXT4_DESC_PER_BLOCK_BITS(sb);
2604                 for (i = 0; i < num_meta_group_infos; i++)
2605                         kfree(sbi->s_group_info[i]);
2606                 kfree(sbi->s_group_info);
2607         }
2608         kfree(sbi->s_mb_offsets);
2609         kfree(sbi->s_mb_maxs);
2610         if (sbi->s_buddy_cache)
2611                 iput(sbi->s_buddy_cache);
2612         if (sbi->s_mb_stats) {
2613                 printk(KERN_INFO
2614                        "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2615                                 atomic_read(&sbi->s_bal_allocated),
2616                                 atomic_read(&sbi->s_bal_reqs),
2617                                 atomic_read(&sbi->s_bal_success));
2618                 printk(KERN_INFO
2619                       "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2620                                 "%u 2^N hits, %u breaks, %u lost\n",
2621                                 atomic_read(&sbi->s_bal_ex_scanned),
2622                                 atomic_read(&sbi->s_bal_goals),
2623                                 atomic_read(&sbi->s_bal_2orders),
2624                                 atomic_read(&sbi->s_bal_breaks),
2625                                 atomic_read(&sbi->s_mb_lost_chunks));
2626                 printk(KERN_INFO
2627                        "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2628                                 sbi->s_mb_buddies_generated++,
2629                                 sbi->s_mb_generation_time);
2630                 printk(KERN_INFO
2631                        "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2632                                 atomic_read(&sbi->s_mb_preallocated),
2633                                 atomic_read(&sbi->s_mb_discarded));
2634         }
2635
2636         free_percpu(sbi->s_locality_groups);
2637         ext4_mb_history_release(sb);
2638         ext4_mb_destroy_per_dev_proc(sb);
2639
2640         return 0;
2641 }
2642
2643 /*
2644  * This function is called by the jbd2 layer once the commit has finished,
2645  * so we know we can free the blocks that were released with that commit.
2646  */
2647 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2648 {
2649         struct super_block *sb = journal->j_private;
2650         struct ext4_buddy e4b;
2651         struct ext4_group_info *db;
2652         int err, count = 0, count2 = 0;
2653         struct ext4_free_data *entry;
2654         ext4_fsblk_t discard_block;
2655         struct list_head *l, *ltmp;
2656
2657         list_for_each_safe(l, ltmp, &txn->t_private_list) {
2658                 entry = list_entry(l, struct ext4_free_data, list);
2659
2660                 mb_debug("gonna free %u blocks in group %u (0x%p):",
2661                          entry->count, entry->group, entry);
2662
2663                 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2664                 /* we expect to find existing buddy because it's pinned */
2665                 BUG_ON(err != 0);
2666
2667                 db = e4b.bd_info;
2668                 /* there are blocks to put in buddy to make them really free */
2669                 count += entry->count;
2670                 count2++;
2671                 ext4_lock_group(sb, entry->group);
2672                 /* Take it out of per group rb tree */
2673                 rb_erase(&entry->node, &(db->bb_free_root));
2674                 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2675
2676                 if (!db->bb_free_root.rb_node) {
2677                         /* No more items in the per group rb tree
2678                          * balance refcounts from ext4_mb_free_metadata()
2679                          */
2680                         page_cache_release(e4b.bd_buddy_page);
2681                         page_cache_release(e4b.bd_bitmap_page);
2682                 }
2683                 ext4_unlock_group(sb, entry->group);
2684                 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2685                         + entry->start_blk
2686                         + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
2687                 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id,
2688                            (unsigned long long) discard_block, entry->count);
2689                 sb_issue_discard(sb, discard_block, entry->count);
2690
2691                 kmem_cache_free(ext4_free_ext_cachep, entry);
2692                 ext4_mb_release_desc(&e4b);
2693         }
2694
2695         mb_debug("freed %u blocks in %u structures\n", count, count2);
2696 }
2697
2698 #define EXT4_MB_STATS_NAME              "stats"
2699 #define EXT4_MB_MAX_TO_SCAN_NAME        "max_to_scan"
2700 #define EXT4_MB_MIN_TO_SCAN_NAME        "min_to_scan"
2701 #define EXT4_MB_ORDER2_REQ              "order2_req"
2702 #define EXT4_MB_STREAM_REQ              "stream_req"
2703 #define EXT4_MB_GROUP_PREALLOC          "group_prealloc"
2704
2705 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2706 {
2707 #ifdef CONFIG_PROC_FS
2708         mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2709         struct ext4_sb_info *sbi = EXT4_SB(sb);
2710         struct proc_dir_entry *proc;
2711
2712         if (sbi->s_proc == NULL)
2713                 return -EINVAL;
2714
2715         EXT4_PROC_HANDLER(EXT4_MB_STATS_NAME, mb_stats);
2716         EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
2717         EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
2718         EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
2719         EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
2720         EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
2721         return 0;
2722
2723 err_out:
2724         remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2725         remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2726         remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2727         remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2728         remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2729         remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2730         return -ENOMEM;
2731 #else
2732         return 0;
2733 #endif
2734 }
2735
2736 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2737 {
2738 #ifdef CONFIG_PROC_FS
2739         struct ext4_sb_info *sbi = EXT4_SB(sb);
2740
2741         if (sbi->s_proc == NULL)
2742                 return -EINVAL;
2743
2744         remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2745         remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2746         remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2747         remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2748         remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2749         remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2750 #endif
2751         return 0;
2752 }
2753
2754 int __init init_ext4_mballoc(void)
2755 {
2756         ext4_pspace_cachep =
2757                 kmem_cache_create("ext4_prealloc_space",
2758                                      sizeof(struct ext4_prealloc_space),
2759                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2760         if (ext4_pspace_cachep == NULL)
2761                 return -ENOMEM;
2762
2763         ext4_ac_cachep =
2764                 kmem_cache_create("ext4_alloc_context",
2765                                      sizeof(struct ext4_allocation_context),
2766                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2767         if (ext4_ac_cachep == NULL) {
2768                 kmem_cache_destroy(ext4_pspace_cachep);
2769                 return -ENOMEM;
2770         }
2771
2772         ext4_free_ext_cachep =
2773                 kmem_cache_create("ext4_free_block_extents",
2774                                      sizeof(struct ext4_free_data),
2775                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2776         if (ext4_free_ext_cachep == NULL) {
2777                 kmem_cache_destroy(ext4_pspace_cachep);
2778                 kmem_cache_destroy(ext4_ac_cachep);
2779                 return -ENOMEM;
2780         }
2781         return 0;
2782 }
2783
2784 void exit_ext4_mballoc(void)
2785 {
2786         /* XXX: synchronize_rcu(); */
2787         kmem_cache_destroy(ext4_pspace_cachep);
2788         kmem_cache_destroy(ext4_ac_cachep);
2789         kmem_cache_destroy(ext4_free_ext_cachep);
2790 }
2791
2792
2793 /*
2794  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2795  * Returns 0 if success or error code
2796  */
2797 static noinline_for_stack int
2798 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2799                                 handle_t *handle, unsigned int reserv_blks)
2800 {
2801         struct buffer_head *bitmap_bh = NULL;
2802         struct ext4_super_block *es;
2803         struct ext4_group_desc *gdp;
2804         struct buffer_head *gdp_bh;
2805         struct ext4_sb_info *sbi;
2806         struct super_block *sb;
2807         ext4_fsblk_t block;
2808         int err, len;
2809
2810         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2811         BUG_ON(ac->ac_b_ex.fe_len <= 0);
2812
2813         sb = ac->ac_sb;
2814         sbi = EXT4_SB(sb);
2815         es = sbi->s_es;
2816
2817
2818         err = -EIO;
2819         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2820         if (!bitmap_bh)
2821                 goto out_err;
2822
2823         err = ext4_journal_get_write_access(handle, bitmap_bh);
2824         if (err)
2825                 goto out_err;
2826
2827         err = -EIO;
2828         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2829         if (!gdp)
2830                 goto out_err;
2831
2832         ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2833                         gdp->bg_free_blocks_count);
2834
2835         err = ext4_journal_get_write_access(handle, gdp_bh);
2836         if (err)
2837                 goto out_err;
2838
2839         block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2840                 + ac->ac_b_ex.fe_start
2841                 + le32_to_cpu(es->s_first_data_block);
2842
2843         len = ac->ac_b_ex.fe_len;
2844         if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2845             in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2846             in_range(block, ext4_inode_table(sb, gdp),
2847                      EXT4_SB(sb)->s_itb_per_group) ||
2848             in_range(block + len - 1, ext4_inode_table(sb, gdp),
2849                      EXT4_SB(sb)->s_itb_per_group)) {
2850                 ext4_error(sb, __func__,
2851                            "Allocating block in system zone - block = %llu",
2852                            block);
2853                 /* File system mounted not to panic on error
2854                  * Fix the bitmap and repeat the block allocation
2855                  * We leak some of the blocks here.
2856                  */
2857                 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
2858                                 bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2859                                 ac->ac_b_ex.fe_len);
2860                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2861                 if (!err)
2862                         err = -EAGAIN;
2863                 goto out_err;
2864         }
2865 #ifdef AGGRESSIVE_CHECK
2866         {
2867                 int i;
2868                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2869                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2870                                                 bitmap_bh->b_data));
2871                 }
2872         }
2873 #endif
2874         mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
2875                                 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2876
2877         spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2878         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2879                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2880                 gdp->bg_free_blocks_count =
2881                         cpu_to_le16(ext4_free_blocks_after_init(sb,
2882                                                 ac->ac_b_ex.fe_group,
2883                                                 gdp));
2884         }
2885         le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
2886         gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2887         spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2888         percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
2889         /*
2890          * Now reduce the dirty block count also. Should not go negative
2891          */
2892         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2893                 /* release all the reserved blocks if non delalloc */
2894                 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
2895         else
2896                 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
2897                                                 ac->ac_b_ex.fe_len);
2898
2899         if (sbi->s_log_groups_per_flex) {
2900                 ext4_group_t flex_group = ext4_flex_group(sbi,
2901                                                           ac->ac_b_ex.fe_group);
2902                 spin_lock(sb_bgl_lock(sbi, flex_group));
2903                 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
2904                 spin_unlock(sb_bgl_lock(sbi, flex_group));
2905         }
2906
2907         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2908         if (err)
2909                 goto out_err;
2910         err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2911
2912 out_err:
2913         sb->s_dirt = 1;
2914         brelse(bitmap_bh);
2915         return err;
2916 }
2917
2918 /*
2919  * here we normalize request for locality group
2920  * Group request are normalized to s_strip size if we set the same via mount
2921  * option. If not we set it to s_mb_group_prealloc which can be configured via
2922  * /proc/fs/ext4/<partition>/group_prealloc
2923  *
2924  * XXX: should we try to preallocate more than the group has now?
2925  */
2926 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2927 {
2928         struct super_block *sb = ac->ac_sb;
2929         struct ext4_locality_group *lg = ac->ac_lg;
2930
2931         BUG_ON(lg == NULL);
2932         if (EXT4_SB(sb)->s_stripe)
2933                 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2934         else
2935                 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2936         mb_debug("#%u: goal %u blocks for locality group\n",
2937                 current->pid, ac->ac_g_ex.fe_len);
2938 }
2939
2940 /*
2941  * Normalization means making request better in terms of
2942  * size and alignment
2943  */
2944 static noinline_for_stack void
2945 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2946                                 struct ext4_allocation_request *ar)
2947 {
2948         int bsbits, max;
2949         ext4_lblk_t end;
2950         loff_t size, orig_size, start_off;
2951         ext4_lblk_t start, orig_start;
2952         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2953         struct ext4_prealloc_space *pa;
2954
2955         /* do normalize only data requests, metadata requests
2956            do not need preallocation */
2957         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2958                 return;
2959
2960         /* sometime caller may want exact blocks */
2961         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2962                 return;
2963
2964         /* caller may indicate that preallocation isn't
2965          * required (it's a tail, for example) */
2966         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2967                 return;
2968
2969         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2970                 ext4_mb_normalize_group_request(ac);
2971                 return ;
2972         }
2973
2974         bsbits = ac->ac_sb->s_blocksize_bits;
2975
2976         /* first, let's learn actual file size
2977          * given current request is allocated */
2978         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2979         size = size << bsbits;
2980         if (size < i_size_read(ac->ac_inode))
2981                 size = i_size_read(ac->ac_inode);
2982
2983         /* max size of free chunks */
2984         max = 2 << bsbits;
2985
2986 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
2987                 (req <= (size) || max <= (chunk_size))
2988
2989         /* first, try to predict filesize */
2990         /* XXX: should this table be tunable? */
2991         start_off = 0;
2992         if (size <= 16 * 1024) {
2993                 size = 16 * 1024;
2994         } else if (size <= 32 * 1024) {
2995                 size = 32 * 1024;
2996         } else if (size <= 64 * 1024) {
2997                 size = 64 * 1024;
2998         } else if (size <= 128 * 1024) {
2999                 size = 128 * 1024;
3000         } else if (size <= 256 * 1024) {
3001                 size = 256 * 1024;
3002         } else if (size <= 512 * 1024) {
3003                 size = 512 * 1024;
3004         } else if (size <= 1024 * 1024) {
3005                 size = 1024 * 1024;
3006         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3007                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3008                                                 (21 - bsbits)) << 21;
3009                 size = 2 * 1024 * 1024;
3010         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3011                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3012                                                         (22 - bsbits)) << 22;
3013                 size = 4 * 1024 * 1024;
3014         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3015                                         (8<<20)>>bsbits, max, 8 * 1024)) {
3016                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3017                                                         (23 - bsbits)) << 23;
3018                 size = 8 * 1024 * 1024;
3019         } else {
3020                 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3021                 size      = ac->ac_o_ex.fe_len << bsbits;
3022         }
3023         orig_size = size = size >> bsbits;
3024         orig_start = start = start_off >> bsbits;
3025
3026         /* don't cover already allocated blocks in selected range */
3027         if (ar->pleft && start <= ar->lleft) {
3028                 size -= ar->lleft + 1 - start;
3029                 start = ar->lleft + 1;
3030         }
3031         if (ar->pright && start + size - 1 >= ar->lright)
3032                 size -= start + size - ar->lright;
3033
3034         end = start + size;
3035
3036         /* check we don't cross already preallocated blocks */
3037         rcu_read_lock();
3038         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3039                 ext4_lblk_t pa_end;
3040
3041                 if (pa->pa_deleted)
3042                         continue;
3043                 spin_lock(&pa->pa_lock);
3044                 if (pa->pa_deleted) {
3045                         spin_unlock(&pa->pa_lock);
3046                         continue;
3047                 }
3048
3049                 pa_end = pa->pa_lstart + pa->pa_len;
3050
3051                 /* PA must not overlap original request */
3052                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3053                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
3054
3055                 /* skip PA normalized request doesn't overlap with */
3056                 if (pa->pa_lstart >= end) {
3057                         spin_unlock(&pa->pa_lock);
3058                         continue;
3059                 }
3060                 if (pa_end <= start) {
3061                         spin_unlock(&pa->pa_lock);
3062                         continue;
3063                 }
3064                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3065
3066                 if (pa_end <= ac->ac_o_ex.fe_logical) {
3067                         BUG_ON(pa_end < start);
3068                         start = pa_end;
3069                 }
3070
3071                 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3072                         BUG_ON(pa->pa_lstart > end);
3073                         end = pa->pa_lstart;
3074                 }
3075                 spin_unlock(&pa->pa_lock);
3076         }
3077         rcu_read_unlock();
3078         size = end - start;
3079
3080         /* XXX: extra loop to check we really don't overlap preallocations */
3081         rcu_read_lock();
3082         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3083                 ext4_lblk_t pa_end;
3084                 spin_lock(&pa->pa_lock);
3085                 if (pa->pa_deleted == 0) {
3086                         pa_end = pa->pa_lstart + pa->pa_len;
3087                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3088                 }
3089                 spin_unlock(&pa->pa_lock);
3090         }
3091         rcu_read_unlock();
3092
3093         if (start + size <= ac->ac_o_ex.fe_logical &&
3094                         start > ac->ac_o_ex.fe_logical) {
3095                 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3096                         (unsigned long) start, (unsigned long) size,
3097                         (unsigned long) ac->ac_o_ex.fe_logical);
3098         }
3099         BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3100                         start > ac->ac_o_ex.fe_logical);
3101         BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3102
3103         /* now prepare goal request */
3104
3105         /* XXX: is it better to align blocks WRT to logical
3106          * placement or satisfy big request as is */
3107         ac->ac_g_ex.fe_logical = start;
3108         ac->ac_g_ex.fe_len = size;
3109
3110         /* define goal start in order to merge */
3111         if (ar->pright && (ar->lright == (start + size))) {
3112                 /* merge to the right */
3113                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3114                                                 &ac->ac_f_ex.fe_group,
3115                                                 &ac->ac_f_ex.fe_start);
3116                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3117         }
3118         if (ar->pleft && (ar->lleft + 1 == start)) {
3119                 /* merge to the left */
3120                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3121                                                 &ac->ac_f_ex.fe_group,
3122                                                 &ac->ac_f_ex.fe_start);
3123                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3124         }
3125
3126         mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3127                 (unsigned) orig_size, (unsigned) start);
3128 }
3129
3130 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3131 {
3132         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3133
3134         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3135                 atomic_inc(&sbi->s_bal_reqs);
3136                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3137                 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3138                         atomic_inc(&sbi->s_bal_success);
3139                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3140                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3141                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3142                         atomic_inc(&sbi->s_bal_goals);
3143                 if (ac->ac_found > sbi->s_mb_max_to_scan)
3144                         atomic_inc(&sbi->s_bal_breaks);
3145         }
3146
3147         ext4_mb_store_history(ac);
3148 }
3149
3150 /*
3151  * use blocks preallocated to inode
3152  */
3153 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3154                                 struct ext4_prealloc_space *pa)
3155 {
3156         ext4_fsblk_t start;
3157         ext4_fsblk_t end;
3158         int len;
3159
3160         /* found preallocated blocks, use them */
3161         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3162         end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3163         len = end - start;
3164         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3165                                         &ac->ac_b_ex.fe_start);
3166         ac->ac_b_ex.fe_len = len;
3167         ac->ac_status = AC_STATUS_FOUND;
3168         ac->ac_pa = pa;
3169
3170         BUG_ON(start < pa->pa_pstart);
3171         BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3172         BUG_ON(pa->pa_free < len);
3173         pa->pa_free -= len;
3174
3175         mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
3176 }
3177
3178 /*
3179  * use blocks preallocated to locality group
3180  */
3181 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3182                                 struct ext4_prealloc_space *pa)
3183 {
3184         unsigned int len = ac->ac_o_ex.fe_len;
3185
3186         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3187                                         &ac->ac_b_ex.fe_group,
3188                                         &ac->ac_b_ex.fe_start);
3189         ac->ac_b_ex.fe_len = len;
3190         ac->ac_status = AC_STATUS_FOUND;
3191         ac->ac_pa = pa;
3192
3193         /* we don't correct pa_pstart or pa_plen here to avoid
3194          * possible race when the group is being loaded concurrently
3195          * instead we correct pa later, after blocks are marked
3196          * in on-disk bitmap -- see ext4_mb_release_context()
3197          * Other CPUs are prevented from allocating from this pa by lg_mutex
3198          */
3199         mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3200 }
3201
3202 /*
3203  * Return the prealloc space that have minimal distance
3204  * from the goal block. @cpa is the prealloc
3205  * space that is having currently known minimal distance
3206  * from the goal block.
3207  */
3208 static struct ext4_prealloc_space *
3209 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3210                         struct ext4_prealloc_space *pa,
3211                         struct ext4_prealloc_space *cpa)
3212 {
3213         ext4_fsblk_t cur_distance, new_distance;
3214
3215         if (cpa == NULL) {
3216                 atomic_inc(&pa->pa_count);
3217                 return pa;
3218         }
3219         cur_distance = abs(goal_block - cpa->pa_pstart);
3220         new_distance = abs(goal_block - pa->pa_pstart);
3221
3222         if (cur_distance < new_distance)
3223                 return cpa;
3224
3225         /* drop the previous reference */
3226         atomic_dec(&cpa->pa_count);
3227         atomic_inc(&pa->pa_count);
3228         return pa;
3229 }
3230
3231 /*
3232  * search goal blocks in preallocated space
3233  */
3234 static noinline_for_stack int
3235 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3236 {
3237         int order, i;
3238         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3239         struct ext4_locality_group *lg;
3240         struct ext4_prealloc_space *pa, *cpa = NULL;
3241         ext4_fsblk_t goal_block;
3242
3243         /* only data can be preallocated */
3244         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3245                 return 0;
3246
3247         /* first, try per-file preallocation */
3248         rcu_read_lock();
3249         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3250
3251                 /* all fields in this condition don't change,
3252                  * so we can skip locking for them */
3253                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3254                         ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3255                         continue;
3256
3257                 /* found preallocated blocks, use them */
3258                 spin_lock(&pa->pa_lock);
3259                 if (pa->pa_deleted == 0 && pa->pa_free) {
3260                         atomic_inc(&pa->pa_count);
3261                         ext4_mb_use_inode_pa(ac, pa);
3262                         spin_unlock(&pa->pa_lock);
3263                         ac->ac_criteria = 10;
3264                         rcu_read_unlock();
3265                         return 1;
3266                 }
3267                 spin_unlock(&pa->pa_lock);
3268         }
3269         rcu_read_unlock();
3270
3271         /* can we use group allocation? */
3272         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3273                 return 0;
3274
3275         /* inode may have no locality group for some reason */
3276         lg = ac->ac_lg;
3277         if (lg == NULL)
3278                 return 0;
3279         order  = fls(ac->ac_o_ex.fe_len) - 1;
3280         if (order > PREALLOC_TB_SIZE - 1)
3281                 /* The max size of hash table is PREALLOC_TB_SIZE */
3282                 order = PREALLOC_TB_SIZE - 1;
3283
3284         goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3285                      ac->ac_g_ex.fe_start +
3286                      le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3287         /*
3288          * search for the prealloc space that is having
3289          * minimal distance from the goal block.
3290          */
3291         for (i = order; i < PREALLOC_TB_SIZE; i++) {
3292                 rcu_read_lock();
3293                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3294                                         pa_inode_list) {
3295                         spin_lock(&pa->pa_lock);
3296                         if (pa->pa_deleted == 0 &&
3297                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
3298
3299                                 cpa = ext4_mb_check_group_pa(goal_block,
3300                                                                 pa, cpa);
3301                         }
3302                         spin_unlock(&pa->pa_lock);
3303                 }
3304                 rcu_read_unlock();
3305         }
3306         if (cpa) {
3307                 ext4_mb_use_group_pa(ac, cpa);
3308                 ac->ac_criteria = 20;
3309                 return 1;
3310         }
3311         return 0;
3312 }
3313
3314 /*
3315  * the function goes through all preallocation in this group and marks them
3316  * used in in-core bitmap. buddy must be generated from this bitmap
3317  * Need to be called with ext4 group lock (ext4_lock_group)
3318  */
3319 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3320                                         ext4_group_t group)
3321 {
3322         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3323         struct ext4_prealloc_space *pa;
3324         struct list_head *cur;
3325         ext4_group_t groupnr;
3326         ext4_grpblk_t start;
3327         int preallocated = 0;
3328         int count = 0;
3329         int len;
3330
3331         /* all form of preallocation discards first load group,
3332          * so the only competing code is preallocation use.
3333          * we don't need any locking here
3334          * notice we do NOT ignore preallocations with pa_deleted
3335          * otherwise we could leave used blocks available for
3336          * allocation in buddy when concurrent ext4_mb_put_pa()
3337          * is dropping preallocation
3338          */
3339         list_for_each(cur, &grp->bb_prealloc_list) {
3340                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3341                 spin_lock(&pa->pa_lock);
3342                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3343                                              &groupnr, &start);
3344                 len = pa->pa_len;
3345                 spin_unlock(&pa->pa_lock);
3346                 if (unlikely(len == 0))
3347                         continue;
3348                 BUG_ON(groupnr != group);
3349                 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3350                                                 bitmap, start, len);
3351                 preallocated += len;
3352                 count++;
3353         }
3354         mb_debug("prellocated %u for group %u\n", preallocated, group);
3355 }
3356
3357 static void ext4_mb_pa_callback(struct rcu_head *head)
3358 {
3359         struct ext4_prealloc_space *pa;
3360         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3361         kmem_cache_free(ext4_pspace_cachep, pa);
3362 }
3363
3364 /*
3365  * drops a reference to preallocated space descriptor
3366  * if this was the last reference and the space is consumed
3367  */
3368 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3369                         struct super_block *sb, struct ext4_prealloc_space *pa)
3370 {
3371         ext4_group_t grp;
3372
3373         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3374                 return;
3375
3376         /* in this short window concurrent discard can set pa_deleted */
3377         spin_lock(&pa->pa_lock);
3378         if (pa->pa_deleted == 1) {
3379                 spin_unlock(&pa->pa_lock);
3380                 return;
3381         }
3382
3383         pa->pa_deleted = 1;
3384         spin_unlock(&pa->pa_lock);
3385
3386         /* -1 is to protect from crossing allocation group */
3387         ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3388
3389         /*
3390          * possible race:
3391          *
3392          *  P1 (buddy init)                     P2 (regular allocation)
3393          *                                      find block B in PA
3394          *  copy on-disk bitmap to buddy
3395          *                                      mark B in on-disk bitmap
3396          *                                      drop PA from group
3397          *  mark all PAs in buddy
3398          *
3399          * thus, P1 initializes buddy with B available. to prevent this
3400          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3401          * against that pair
3402          */
3403         ext4_lock_group(sb, grp);
3404         list_del(&pa->pa_group_list);
3405         ext4_unlock_group(sb, grp);
3406
3407         spin_lock(pa->pa_obj_lock);
3408         list_del_rcu(&pa->pa_inode_list);
3409         spin_unlock(pa->pa_obj_lock);
3410
3411         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3412 }
3413
3414 /*
3415  * creates new preallocated space for given inode
3416  */
3417 static noinline_for_stack int
3418 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3419 {
3420         struct super_block *sb = ac->ac_sb;
3421         struct ext4_prealloc_space *pa;
3422         struct ext4_group_info *grp;
3423         struct ext4_inode_info *ei;
3424
3425         /* preallocate only when found space is larger then requested */
3426         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3427         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3428         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3429
3430         pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3431         if (pa == NULL)
3432                 return -ENOMEM;
3433
3434         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3435                 int winl;
3436                 int wins;
3437                 int win;
3438                 int offs;
3439
3440                 /* we can't allocate as much as normalizer wants.
3441                  * so, found space must get proper lstart
3442                  * to cover original request */
3443                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3444                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3445
3446                 /* we're limited by original request in that
3447                  * logical block must be covered any way
3448                  * winl is window we can move our chunk within */
3449                 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3450
3451                 /* also, we should cover whole original request */
3452                 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3453
3454                 /* the smallest one defines real window */
3455                 win = min(winl, wins);
3456
3457                 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3458                 if (offs && offs < win)
3459                         win = offs;
3460
3461                 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3462                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3463                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3464         }
3465
3466         /* preallocation can change ac_b_ex, thus we store actually
3467          * allocated blocks for history */
3468         ac->ac_f_ex = ac->ac_b_ex;
3469
3470         pa->pa_lstart = ac->ac_b_ex.fe_logical;
3471         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3472         pa->pa_len = ac->ac_b_ex.fe_len;
3473         pa->pa_free = pa->pa_len;
3474         atomic_set(&pa->pa_count, 1);
3475         spin_lock_init(&pa->pa_lock);
3476         pa->pa_deleted = 0;
3477         pa->pa_linear = 0;
3478
3479         mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3480                         pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3481
3482         ext4_mb_use_inode_pa(ac, pa);
3483         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3484
3485         ei = EXT4_I(ac->ac_inode);
3486         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3487
3488         pa->pa_obj_lock = &ei->i_prealloc_lock;
3489         pa->pa_inode = ac->ac_inode;
3490
3491         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3492         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3493         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3494
3495         spin_lock(pa->pa_obj_lock);
3496         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3497         spin_unlock(pa->pa_obj_lock);
3498
3499         return 0;
3500 }
3501
3502 /*
3503  * creates new preallocated space for locality group inodes belongs to
3504  */
3505 static noinline_for_stack int
3506 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3507 {
3508         struct super_block *sb = ac->ac_sb;
3509         struct ext4_locality_group *lg;
3510         struct ext4_prealloc_space *pa;
3511         struct ext4_group_info *grp;
3512
3513         /* preallocate only when found space is larger then requested */
3514         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3515         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3516         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3517
3518         BUG_ON(ext4_pspace_cachep == NULL);
3519         pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3520         if (pa == NULL)
3521                 return -ENOMEM;
3522
3523         /* preallocation can change ac_b_ex, thus we store actually
3524          * allocated blocks for history */
3525         ac->ac_f_ex = ac->ac_b_ex;
3526
3527         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3528         pa->pa_lstart = pa->pa_pstart;
3529         pa->pa_len = ac->ac_b_ex.fe_len;
3530         pa->pa_free = pa->pa_len;
3531         atomic_set(&pa->pa_count, 1);
3532         spin_lock_init(&pa->pa_lock);
3533         INIT_LIST_HEAD(&pa->pa_inode_list);
3534         pa->pa_deleted = 0;
3535         pa->pa_linear = 1;
3536
3537         mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3538                         pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3539
3540         ext4_mb_use_group_pa(ac, pa);
3541         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3542
3543         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3544         lg = ac->ac_lg;
3545         BUG_ON(lg == NULL);
3546
3547         pa->pa_obj_lock = &lg->lg_prealloc_lock;
3548         pa->pa_inode = NULL;
3549
3550         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3551         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3552         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3553
3554         /*
3555          * We will later add the new pa to the right bucket
3556          * after updating the pa_free in ext4_mb_release_context
3557          */
3558         return 0;
3559 }
3560
3561 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3562 {
3563         int err;
3564
3565         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3566                 err = ext4_mb_new_group_pa(ac);
3567         else
3568                 err = ext4_mb_new_inode_pa(ac);
3569         return err;
3570 }
3571
3572 /*
3573  * finds all unused blocks in on-disk bitmap, frees them in
3574  * in-core bitmap and buddy.
3575  * @pa must be unlinked from inode and group lists, so that
3576  * nobody else can find/use it.
3577  * the caller MUST hold group/inode locks.
3578  * TODO: optimize the case when there are no in-core structures yet
3579  */
3580 static noinline_for_stack int
3581 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3582                         struct ext4_prealloc_space *pa,
3583                         struct ext4_allocation_context *ac)
3584 {
3585         struct super_block *sb = e4b->bd_sb;
3586         struct ext4_sb_info *sbi = EXT4_SB(sb);
3587         unsigned int end;
3588         unsigned int next;
3589         ext4_group_t group;
3590         ext4_grpblk_t bit;
3591         sector_t start;
3592         int err = 0;
3593         int free = 0;
3594
3595         BUG_ON(pa->pa_deleted == 0);
3596         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3597         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3598         end = bit + pa->pa_len;
3599
3600         if (ac) {
3601                 ac->ac_sb = sb;
3602                 ac->ac_inode = pa->pa_inode;
3603                 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3604         }
3605
3606         while (bit < end) {
3607                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3608                 if (bit >= end)
3609                         break;
3610                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3611                 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3612                                 le32_to_cpu(sbi->s_es->s_first_data_block);
3613                 mb_debug("    free preallocated %u/%u in group %u\n",
3614                                 (unsigned) start, (unsigned) next - bit,
3615                                 (unsigned) group);
3616                 free += next - bit;
3617
3618                 if (ac) {
3619                         ac->ac_b_ex.fe_group = group;
3620                         ac->ac_b_ex.fe_start = bit;
3621                         ac->ac_b_ex.fe_len = next - bit;
3622                         ac->ac_b_ex.fe_logical = 0;
3623                         ext4_mb_store_history(ac);
3624                 }
3625
3626                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3627                 bit = next + 1;
3628         }
3629         if (free != pa->pa_free) {
3630                 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3631                         pa, (unsigned long) pa->pa_lstart,
3632                         (unsigned long) pa->pa_pstart,
3633                         (unsigned long) pa->pa_len);
3634                 ext4_error(sb, __func__, "free %u, pa_free %u",
3635                                                 free, pa->pa_free);
3636                 /*
3637                  * pa is already deleted so we use the value obtained
3638                  * from the bitmap and continue.
3639                  */
3640         }
3641         atomic_add(free, &sbi->s_mb_discarded);
3642
3643         return err;
3644 }
3645
3646 static noinline_for_stack int
3647 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3648                                 struct ext4_prealloc_space *pa,
3649                                 struct ext4_allocation_context *ac)
3650 {
3651         struct super_block *sb = e4b->bd_sb;
3652         ext4_group_t group;
3653         ext4_grpblk_t bit;
3654
3655         if (ac)
3656                 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3657
3658         BUG_ON(pa->pa_deleted == 0);
3659         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3660         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3661         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3662         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3663
3664         if (ac) {
3665                 ac->ac_sb = sb;
3666                 ac->ac_inode = NULL;
3667                 ac->ac_b_ex.fe_group = group;
3668                 ac->ac_b_ex.fe_start = bit;
3669                 ac->ac_b_ex.fe_len = pa->pa_len;
3670                 ac->ac_b_ex.fe_logical = 0;
3671                 ext4_mb_store_history(ac);
3672         }
3673
3674         return 0;
3675 }
3676
3677 /*
3678  * releases all preallocations in given group
3679  *
3680  * first, we need to decide discard policy:
3681  * - when do we discard
3682  *   1) ENOSPC
3683  * - how many do we discard
3684  *   1) how many requested
3685  */
3686 static noinline_for_stack int
3687 ext4_mb_discard_group_preallocations(struct super_block *sb,
3688                                         ext4_group_t group, int needed)
3689 {
3690         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3691         struct buffer_head *bitmap_bh = NULL;
3692         struct ext4_prealloc_space *pa, *tmp;
3693         struct ext4_allocation_context *ac;
3694         struct list_head list;
3695         struct ext4_buddy e4b;
3696         int err;
3697         int busy = 0;
3698         int free = 0;
3699
3700         mb_debug("discard preallocation for group %u\n", group);
3701
3702         if (list_empty(&grp->bb_prealloc_list))
3703                 return 0;
3704
3705         bitmap_bh = ext4_read_block_bitmap(sb, group);
3706         if (bitmap_bh == NULL) {
3707                 ext4_error(sb, __func__, "Error in reading block "
3708                                 "bitmap for %u", group);
3709                 return 0;
3710         }
3711
3712         err = ext4_mb_load_buddy(sb, group, &e4b);
3713         if (err) {
3714                 ext4_error(sb, __func__, "Error in loading buddy "
3715                                 "information for %u", group);
3716                 put_bh(bitmap_bh);
3717                 return 0;
3718         }
3719
3720         if (needed == 0)
3721                 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3722
3723         INIT_LIST_HEAD(&list);
3724         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3725 repeat:
3726         ext4_lock_group(sb, group);
3727         list_for_each_entry_safe(pa, tmp,
3728                                 &grp->bb_prealloc_list, pa_group_list) {
3729                 spin_lock(&pa->pa_lock);
3730                 if (atomic_read(&pa->pa_count)) {
3731                         spin_unlock(&pa->pa_lock);
3732                         busy = 1;
3733                         continue;
3734                 }
3735                 if (pa->pa_deleted) {
3736                         spin_unlock(&pa->pa_lock);
3737                         continue;
3738                 }
3739
3740                 /* seems this one can be freed ... */
3741                 pa->pa_deleted = 1;
3742
3743                 /* we can trust pa_free ... */
3744                 free += pa->pa_free;
3745
3746                 spin_unlock(&pa->pa_lock);
3747
3748                 list_del(&pa->pa_group_list);
3749                 list_add(&pa->u.pa_tmp_list, &list);
3750         }
3751
3752         /* if we still need more blocks and some PAs were used, try again */
3753         if (free < needed && busy) {
3754                 busy = 0;
3755                 ext4_unlock_group(sb, group);
3756                 /*
3757                  * Yield the CPU here so that we don't get soft lockup
3758                  * in non preempt case.
3759                  */
3760                 yield();
3761                 goto repeat;
3762         }
3763
3764         /* found anything to free? */
3765         if (list_empty(&list)) {
3766                 BUG_ON(free != 0);
3767                 goto out;
3768         }
3769
3770         /* now free all selected PAs */
3771         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3772
3773                 /* remove from object (inode or locality group) */
3774                 spin_lock(pa->pa_obj_lock);
3775                 list_del_rcu(&pa->pa_inode_list);
3776                 spin_unlock(pa->pa_obj_lock);
3777
3778                 if (pa->pa_linear)
3779                         ext4_mb_release_group_pa(&e4b, pa, ac);
3780                 else
3781                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3782
3783                 list_del(&pa->u.pa_tmp_list);
3784                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3785         }
3786
3787 out:
3788         ext4_unlock_group(sb, group);
3789         if (ac)
3790                 kmem_cache_free(ext4_ac_cachep, ac);
3791         ext4_mb_release_desc(&e4b);
3792         put_bh(bitmap_bh);
3793         return free;
3794 }
3795
3796 /*
3797  * releases all non-used preallocated blocks for given inode
3798  *
3799  * It's important to discard preallocations under i_data_sem
3800  * We don't want another block to be served from the prealloc
3801  * space when we are discarding the inode prealloc space.
3802  *
3803  * FIXME!! Make sure it is valid at all the call sites
3804  */
3805 void ext4_discard_preallocations(struct inode *inode)
3806 {
3807         struct ext4_inode_info *ei = EXT4_I(inode);
3808         struct super_block *sb = inode->i_sb;
3809         struct buffer_head *bitmap_bh = NULL;
3810         struct ext4_prealloc_space *pa, *tmp;
3811         struct ext4_allocation_context *ac;
3812         ext4_group_t group = 0;
3813         struct list_head list;
3814         struct ext4_buddy e4b;
3815         int err;
3816
3817         if (!S_ISREG(inode->i_mode)) {
3818                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3819                 return;
3820         }
3821
3822         mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3823
3824         INIT_LIST_HEAD(&list);
3825
3826         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3827 repeat:
3828         /* first, collect all pa's in the inode */
3829         spin_lock(&ei->i_prealloc_lock);
3830         while (!list_empty(&ei->i_prealloc_list)) {
3831                 pa = list_entry(ei->i_prealloc_list.next,
3832                                 struct ext4_prealloc_space, pa_inode_list);
3833                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3834                 spin_lock(&pa->pa_lock);
3835                 if (atomic_read(&pa->pa_count)) {
3836                         /* this shouldn't happen often - nobody should
3837                          * use preallocation while we're discarding it */
3838                         spin_unlock(&pa->pa_lock);
3839                         spin_unlock(&ei->i_prealloc_lock);
3840                         printk(KERN_ERR "uh-oh! used pa while discarding\n");
3841                         WARN_ON(1);
3842                         schedule_timeout_uninterruptible(HZ);
3843                         goto repeat;
3844
3845                 }
3846                 if (pa->pa_deleted == 0) {
3847                         pa->pa_deleted = 1;
3848                         spin_unlock(&pa->pa_lock);
3849                         list_del_rcu(&pa->pa_inode_list);
3850                         list_add(&pa->u.pa_tmp_list, &list);
3851                         continue;
3852                 }
3853
3854                 /* someone is deleting pa right now */
3855                 spin_unlock(&pa->pa_lock);
3856                 spin_unlock(&ei->i_prealloc_lock);
3857
3858                 /* we have to wait here because pa_deleted
3859                  * doesn't mean pa is already unlinked from
3860                  * the list. as we might be called from
3861                  * ->clear_inode() the inode will get freed
3862                  * and concurrent thread which is unlinking
3863                  * pa from inode's list may access already
3864                  * freed memory, bad-bad-bad */
3865
3866                 /* XXX: if this happens too often, we can
3867                  * add a flag to force wait only in case
3868                  * of ->clear_inode(), but not in case of
3869                  * regular truncate */
3870                 schedule_timeout_uninterruptible(HZ);
3871                 goto repeat;
3872         }
3873         spin_unlock(&ei->i_prealloc_lock);
3874
3875         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3876                 BUG_ON(pa->pa_linear != 0);
3877                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3878
3879                 err = ext4_mb_load_buddy(sb, group, &e4b);
3880                 if (err) {
3881                         ext4_error(sb, __func__, "Error in loading buddy "
3882                                         "information for %u", group);
3883                         continue;
3884                 }
3885
3886                 bitmap_bh = ext4_read_block_bitmap(sb, group);
3887                 if (bitmap_bh == NULL) {
3888                         ext4_error(sb, __func__, "Error in reading block "
3889                                         "bitmap for %u", group);
3890                         ext4_mb_release_desc(&e4b);
3891                         continue;
3892                 }
3893
3894                 ext4_lock_group(sb, group);
3895                 list_del(&pa->pa_group_list);
3896                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3897                 ext4_unlock_group(sb, group);
3898
3899                 ext4_mb_release_desc(&e4b);
3900                 put_bh(bitmap_bh);
3901
3902                 list_del(&pa->u.pa_tmp_list);
3903                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3904         }
3905         if (ac)
3906                 kmem_cache_free(ext4_ac_cachep, ac);
3907 }
3908
3909 /*
3910  * finds all preallocated spaces and return blocks being freed to them
3911  * if preallocated space becomes full (no block is used from the space)
3912  * then the function frees space in buddy
3913  * XXX: at the moment, truncate (which is the only way to free blocks)
3914  * discards all preallocations
3915  */
3916 static void ext4_mb_return_to_preallocation(struct inode *inode,
3917                                         struct ext4_buddy *e4b,
3918                                         sector_t block, int count)
3919 {
3920         BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3921 }
3922 #ifdef MB_DEBUG
3923 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3924 {
3925         struct super_block *sb = ac->ac_sb;
3926         ext4_group_t i;
3927
3928         printk(KERN_ERR "EXT4-fs: Can't allocate:"
3929                         " Allocation context details:\n");
3930         printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3931                         ac->ac_status, ac->ac_flags);
3932         printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3933                         "best %lu/%lu/%lu@%lu cr %d\n",
3934                         (unsigned long)ac->ac_o_ex.fe_group,
3935                         (unsigned long)ac->ac_o_ex.fe_start,
3936                         (unsigned long)ac->ac_o_ex.fe_len,
3937                         (unsigned long)ac->ac_o_ex.fe_logical,
3938                         (unsigned long)ac->ac_g_ex.fe_group,
3939                         (unsigned long)ac->ac_g_ex.fe_start,
3940                         (unsigned long)ac->ac_g_ex.fe_len,
3941                         (unsigned long)ac->ac_g_ex.fe_logical,
3942                         (unsigned long)ac->ac_b_ex.fe_group,
3943                         (unsigned long)ac->ac_b_ex.fe_start,
3944                         (unsigned long)ac->ac_b_ex.fe_len,
3945                         (unsigned long)ac->ac_b_ex.fe_logical,
3946                         (int)ac->ac_criteria);
3947         printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3948                 ac->ac_found);
3949         printk(KERN_ERR "EXT4-fs: groups: \n");
3950         for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
3951                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3952                 struct ext4_prealloc_space *pa;
3953                 ext4_grpblk_t start;
3954                 struct list_head *cur;
3955                 ext4_lock_group(sb, i);
3956                 list_for_each(cur, &grp->bb_prealloc_list) {
3957                         pa = list_entry(cur, struct ext4_prealloc_space,
3958                                         pa_group_list);
3959                         spin_lock(&pa->pa_lock);
3960                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3961                                                      NULL, &start);
3962                         spin_unlock(&pa->pa_lock);
3963                         printk(KERN_ERR "PA:%lu:%d:%u \n", i,
3964                                                         start, pa->pa_len);
3965                 }
3966                 ext4_unlock_group(sb, i);
3967
3968                 if (grp->bb_free == 0)
3969                         continue;
3970                 printk(KERN_ERR "%lu: %d/%d \n",
3971                        i, grp->bb_free, grp->bb_fragments);
3972         }
3973         printk(KERN_ERR "\n");
3974 }
3975 #else
3976 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3977 {
3978         return;
3979 }
3980 #endif
3981
3982 /*
3983  * We use locality group preallocation for small size file. The size of the
3984  * file is determined by the current size or the resulting size after
3985  * allocation which ever is larger
3986  *
3987  * One can tune this size via /proc/fs/ext4/<partition>/stream_req
3988  */
3989 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3990 {
3991         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3992         int bsbits = ac->ac_sb->s_blocksize_bits;
3993         loff_t size, isize;
3994
3995         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3996                 return;
3997
3998         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3999         isize = i_size_read(ac->ac_inode) >> bsbits;
4000         size = max(size, isize);
4001
4002         /* don't use group allocation for large files */
4003         if (size >= sbi->s_mb_stream_request)
4004                 return;
4005
4006         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4007                 return;
4008
4009         BUG_ON(ac->ac_lg != NULL);
4010         /*
4011          * locality group prealloc space are per cpu. The reason for having
4012          * per cpu locality group is to reduce the contention between block
4013          * request from multiple CPUs.
4014          */
4015         ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
4016
4017         /* we're going to use group allocation */
4018         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4019
4020         /* serialize all allocations in the group */
4021         mutex_lock(&ac->ac_lg->lg_mutex);
4022 }
4023
4024 static noinline_for_stack int
4025 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4026                                 struct ext4_allocation_request *ar)
4027 {
4028         struct super_block *sb = ar->inode->i_sb;
4029         struct ext4_sb_info *sbi = EXT4_SB(sb);
4030         struct ext4_super_block *es = sbi->s_es;
4031         ext4_group_t group;
4032         unsigned int len;
4033         ext4_fsblk_t goal;
4034         ext4_grpblk_t block;
4035
4036         /* we can't allocate > group size */
4037         len = ar->len;
4038
4039         /* just a dirty hack to filter too big requests  */
4040         if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4041                 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4042
4043         /* start searching from the goal */
4044         goal = ar->goal;
4045         if (goal < le32_to_cpu(es->s_first_data_block) ||
4046                         goal >= ext4_blocks_count(es))
4047                 goal = le32_to_cpu(es->s_first_data_block);
4048         ext4_get_group_no_and_offset(sb, goal, &group, &block);
4049
4050         /* set up allocation goals */
4051         ac->ac_b_ex.fe_logical = ar->logical;
4052         ac->ac_b_ex.fe_group = 0;
4053         ac->ac_b_ex.fe_start = 0;
4054         ac->ac_b_ex.fe_len = 0;
4055         ac->ac_status = AC_STATUS_CONTINUE;
4056         ac->ac_groups_scanned = 0;
4057         ac->ac_ex_scanned = 0;
4058         ac->ac_found = 0;
4059         ac->ac_sb = sb;
4060         ac->ac_inode = ar->inode;
4061         ac->ac_o_ex.fe_logical = ar->logical;
4062         ac->ac_o_ex.fe_group = group;
4063         ac->ac_o_ex.fe_start = block;
4064         ac->ac_o_ex.fe_len = len;
4065         ac->ac_g_ex.fe_logical = ar->logical;
4066         ac->ac_g_ex.fe_group = group;
4067         ac->ac_g_ex.fe_start = block;
4068         ac->ac_g_ex.fe_len = len;
4069         ac->ac_f_ex.fe_len = 0;
4070         ac->ac_flags = ar->flags;
4071         ac->ac_2order = 0;
4072         ac->ac_criteria = 0;
4073         ac->ac_pa = NULL;
4074         ac->ac_bitmap_page = NULL;
4075         ac->ac_buddy_page = NULL;
4076         ac->ac_lg = NULL;
4077
4078         /* we have to define context: we'll we work with a file or
4079          * locality group. this is a policy, actually */
4080         ext4_mb_group_or_file(ac);
4081
4082         mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4083                         "left: %u/%u, right %u/%u to %swritable\n",
4084                         (unsigned) ar->len, (unsigned) ar->logical,
4085                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4086                         (unsigned) ar->lleft, (unsigned) ar->pleft,
4087                         (unsigned) ar->lright, (unsigned) ar->pright,
4088                         atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4089         return 0;
4090
4091 }
4092
4093 static noinline_for_stack void
4094 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4095                                         struct ext4_locality_group *lg,
4096                                         int order, int total_entries)
4097 {
4098         ext4_group_t group = 0;
4099         struct ext4_buddy e4b;
4100         struct list_head discard_list;
4101         struct ext4_prealloc_space *pa, *tmp;
4102         struct ext4_allocation_context *ac;
4103
4104         mb_debug("discard locality group preallocation\n");
4105
4106         INIT_LIST_HEAD(&discard_list);
4107         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4108
4109         spin_lock(&lg->lg_prealloc_lock);
4110         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4111                                                 pa_inode_list) {
4112                 spin_lock(&pa->pa_lock);
4113                 if (atomic_read(&pa->pa_count)) {
4114                         /*
4115                          * This is the pa that we just used
4116                          * for block allocation. So don't
4117                          * free that
4118                          */
4119                         spin_unlock(&pa->pa_lock);
4120                         continue;
4121                 }
4122                 if (pa->pa_deleted) {
4123                         spin_unlock(&pa->pa_lock);
4124                         continue;
4125                 }
4126                 /* only lg prealloc space */
4127                 BUG_ON(!pa->pa_linear);
4128
4129                 /* seems this one can be freed ... */
4130                 pa->pa_deleted = 1;
4131                 spin_unlock(&pa->pa_lock);
4132
4133                 list_del_rcu(&pa->pa_inode_list);
4134                 list_add(&pa->u.pa_tmp_list, &discard_list);
4135
4136                 total_entries--;
4137                 if (total_entries <= 5) {
4138                         /*
4139                          * we want to keep only 5 entries
4140                          * allowing it to grow to 8. This
4141                          * mak sure we don't call discard
4142                          * soon for this list.
4143                          */
4144                         break;
4145                 }
4146         }
4147         spin_unlock(&lg->lg_prealloc_lock);
4148
4149         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4150
4151                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4152                 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4153                         ext4_error(sb, __func__, "Error in loading buddy "
4154                                         "information for %u", group);
4155                         continue;
4156                 }
4157                 ext4_lock_group(sb, group);
4158                 list_del(&pa->pa_group_list);
4159                 ext4_mb_release_group_pa(&e4b, pa, ac);
4160                 ext4_unlock_group(sb, group);
4161
4162                 ext4_mb_release_desc(&e4b);
4163                 list_del(&pa->u.pa_tmp_list);
4164                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4165         }
4166         if (ac)
4167                 kmem_cache_free(ext4_ac_cachep, ac);
4168 }
4169
4170 /*
4171  * We have incremented pa_count. So it cannot be freed at this
4172  * point. Also we hold lg_mutex. So no parallel allocation is
4173  * possible from this lg. That means pa_free cannot be updated.
4174  *
4175  * A parallel ext4_mb_discard_group_preallocations is possible.
4176  * which can cause the lg_prealloc_list to be updated.
4177  */
4178
4179 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4180 {
4181         int order, added = 0, lg_prealloc_count = 1;
4182         struct super_block *sb = ac->ac_sb;
4183         struct ext4_locality_group *lg = ac->ac_lg;
4184         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4185
4186         order = fls(pa->pa_free) - 1;
4187         if (order > PREALLOC_TB_SIZE - 1)
4188                 /* The max size of hash table is PREALLOC_TB_SIZE */
4189                 order = PREALLOC_TB_SIZE - 1;
4190         /* Add the prealloc space to lg */
4191         rcu_read_lock();
4192         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4193                                                 pa_inode_list) {
4194                 spin_lock(&tmp_pa->pa_lock);
4195                 if (tmp_pa->pa_deleted) {
4196                         spin_unlock(&pa->pa_lock);
4197                         continue;
4198                 }
4199                 if (!added && pa->pa_free < tmp_pa->pa_free) {
4200                         /* Add to the tail of the previous entry */
4201                         list_add_tail_rcu(&pa->pa_inode_list,
4202                                                 &tmp_pa->pa_inode_list);
4203                         added = 1;
4204                         /*
4205                          * we want to count the total
4206                          * number of entries in the list
4207                          */
4208                 }
4209                 spin_unlock(&tmp_pa->pa_lock);
4210                 lg_prealloc_count++;
4211         }
4212         if (!added)
4213                 list_add_tail_rcu(&pa->pa_inode_list,
4214                                         &lg->lg_prealloc_list[order]);
4215         rcu_read_unlock();
4216
4217         /* Now trim the list to be not more than 8 elements */
4218         if (lg_prealloc_count > 8) {
4219                 ext4_mb_discard_lg_preallocations(sb, lg,
4220                                                 order, lg_prealloc_count);
4221                 return;
4222         }
4223         return ;
4224 }
4225
4226 /*
4227  * release all resource we used in allocation
4228  */
4229 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4230 {
4231         struct ext4_prealloc_space *pa = ac->ac_pa;
4232         if (pa) {
4233                 if (pa->pa_linear) {
4234                         /* see comment in ext4_mb_use_group_pa() */
4235                         spin_lock(&pa->pa_lock);
4236                         pa->pa_pstart += ac->ac_b_ex.fe_len;
4237                         pa->pa_lstart += ac->ac_b_ex.fe_len;
4238                         pa->pa_free -= ac->ac_b_ex.fe_len;
4239                         pa->pa_len -= ac->ac_b_ex.fe_len;
4240                         spin_unlock(&pa->pa_lock);
4241                         /*
4242                          * We want to add the pa to the right bucket.
4243                          * Remove it from the list and while adding
4244                          * make sure the list to which we are adding
4245                          * doesn't grow big.
4246                          */
4247                         if (likely(pa->pa_free)) {
4248                                 spin_lock(pa->pa_obj_lock);
4249                                 list_del_rcu(&pa->pa_inode_list);
4250                                 spin_unlock(pa->pa_obj_lock);
4251                                 ext4_mb_add_n_trim(ac);
4252                         }
4253                 }
4254                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4255         }
4256         if (ac->ac_bitmap_page)
4257                 page_cache_release(ac->ac_bitmap_page);
4258         if (ac->ac_buddy_page)
4259                 page_cache_release(ac->ac_buddy_page);
4260         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4261                 mutex_unlock(&ac->ac_lg->lg_mutex);
4262         ext4_mb_collect_stats(ac);
4263         return 0;
4264 }
4265
4266 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4267 {
4268         ext4_group_t i;
4269         int ret;
4270         int freed = 0;
4271
4272         for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4273                 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4274                 freed += ret;
4275                 needed -= ret;
4276         }
4277
4278         return freed;
4279 }
4280
4281 /*
4282  * Main entry point into mballoc to allocate blocks
4283  * it tries to use preallocation first, then falls back
4284  * to usual allocation
4285  */
4286 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4287                                  struct ext4_allocation_request *ar, int *errp)
4288 {
4289         int freed;
4290         struct ext4_allocation_context *ac = NULL;
4291         struct ext4_sb_info *sbi;
4292         struct super_block *sb;
4293         ext4_fsblk_t block = 0;
4294         unsigned int inquota;
4295         unsigned int reserv_blks = 0;
4296
4297         sb = ar->inode->i_sb;
4298         sbi = EXT4_SB(sb);
4299
4300         if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
4301                 /*
4302                  * With delalloc we already reserved the blocks
4303                  */
4304                 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4305                         /* let others to free the space */
4306                         yield();
4307                         ar->len = ar->len >> 1;
4308                 }
4309                 if (!ar->len) {
4310                         *errp = -ENOSPC;
4311                         return 0;
4312                 }
4313                 reserv_blks = ar->len;
4314         }
4315         while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4316                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4317                 ar->len--;
4318         }
4319         if (ar->len == 0) {
4320                 *errp = -EDQUOT;
4321                 return 0;
4322         }
4323         inquota = ar->len;
4324
4325         if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4326                 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4327
4328         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4329         if (!ac) {
4330                 ar->len = 0;
4331                 *errp = -ENOMEM;
4332                 goto out1;
4333         }
4334
4335         *errp = ext4_mb_initialize_context(ac, ar);
4336         if (*errp) {
4337                 ar->len = 0;
4338                 goto out2;
4339         }
4340
4341         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4342         if (!ext4_mb_use_preallocated(ac)) {
4343                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4344                 ext4_mb_normalize_request(ac, ar);
4345 repeat:
4346                 /* allocate space in core */
4347                 ext4_mb_regular_allocator(ac);
4348
4349                 /* as we've just preallocated more space than
4350                  * user requested orinally, we store allocated
4351                  * space in a special descriptor */
4352                 if (ac->ac_status == AC_STATUS_FOUND &&
4353                                 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4354                         ext4_mb_new_preallocation(ac);
4355         }
4356
4357         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4358                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4359                 if (*errp ==  -EAGAIN) {
4360                         ac->ac_b_ex.fe_group = 0;
4361                         ac->ac_b_ex.fe_start = 0;
4362                         ac->ac_b_ex.fe_len = 0;
4363                         ac->ac_status = AC_STATUS_CONTINUE;
4364                         goto repeat;
4365                 } else if (*errp) {
4366                         ac->ac_b_ex.fe_len = 0;
4367                         ar->len = 0;
4368                         ext4_mb_show_ac(ac);
4369                 } else {
4370                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4371                         ar->len = ac->ac_b_ex.fe_len;
4372                 }
4373         } else {
4374                 freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4375                 if (freed)
4376                         goto repeat;
4377                 *errp = -ENOSPC;
4378                 ac->ac_b_ex.fe_len = 0;
4379                 ar->len = 0;
4380                 ext4_mb_show_ac(ac);
4381         }
4382
4383         ext4_mb_release_context(ac);
4384
4385 out2:
4386         kmem_cache_free(ext4_ac_cachep, ac);
4387 out1:
4388         if (ar->len < inquota)
4389                 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4390
4391         return block;
4392 }
4393
4394 /*
4395  * We can merge two free data extents only if the physical blocks
4396  * are contiguous, AND the extents were freed by the same transaction,
4397  * AND the blocks are associated with the same group.
4398  */
4399 static int can_merge(struct ext4_free_data *entry1,
4400                         struct ext4_free_data *entry2)
4401 {
4402         if ((entry1->t_tid == entry2->t_tid) &&
4403             (entry1->group == entry2->group) &&
4404             ((entry1->start_blk + entry1->count) == entry2->start_blk))
4405                 return 1;
4406         return 0;
4407 }
4408
4409 static noinline_for_stack int
4410 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4411                           ext4_group_t group, ext4_grpblk_t block, int count)
4412 {
4413         struct ext4_group_info *db = e4b->bd_info;
4414         struct super_block *sb = e4b->bd_sb;
4415         struct ext4_sb_info *sbi = EXT4_SB(sb);
4416         struct ext4_free_data *entry, *new_entry;
4417         struct rb_node **n = &db->bb_free_root.rb_node, *node;
4418         struct rb_node *parent = NULL, *new_node;
4419
4420         BUG_ON(!ext4_handle_valid(handle));
4421         BUG_ON(e4b->bd_bitmap_page == NULL);
4422         BUG_ON(e4b->bd_buddy_page == NULL);
4423
4424         new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4425         new_entry->start_blk = block;
4426         new_entry->group  = group;
4427         new_entry->count = count;
4428         new_entry->t_tid = handle->h_transaction->t_tid;
4429         new_node = &new_entry->node;
4430
4431         ext4_lock_group(sb, group);
4432         if (!*n) {
4433                 /* first free block exent. We need to
4434                    protect buddy cache from being freed,
4435                  * otherwise we'll refresh it from
4436                  * on-disk bitmap and lose not-yet-available
4437                  * blocks */
4438                 page_cache_get(e4b->bd_buddy_page);
4439                 page_cache_get(e4b->bd_bitmap_page);
4440         }
4441         while (*n) {
4442                 parent = *n;
4443                 entry = rb_entry(parent, struct ext4_free_data, node);
4444                 if (block < entry->start_blk)
4445                         n = &(*n)->rb_left;
4446                 else if (block >= (entry->start_blk + entry->count))
4447                         n = &(*n)->rb_right;
4448                 else {
4449                         ext4_unlock_group(sb, group);
4450                         ext4_error(sb, __func__,
4451                             "Double free of blocks %d (%d %d)",
4452                             block, entry->start_blk, entry->count);
4453                         return 0;
4454                 }
4455         }
4456
4457         rb_link_node(new_node, parent, n);
4458         rb_insert_color(new_node, &db->bb_free_root);
4459
4460         /* Now try to see the extent can be merged to left and right */
4461         node = rb_prev(new_node);
4462         if (node) {
4463                 entry = rb_entry(node, struct ext4_free_data, node);
4464                 if (can_merge(entry, new_entry)) {
4465                         new_entry->start_blk = entry->start_blk;
4466                         new_entry->count += entry->count;
4467                         rb_erase(node, &(db->bb_free_root));
4468                         spin_lock(&sbi->s_md_lock);
4469                         list_del(&entry->list);
4470                         spin_unlock(&sbi->s_md_lock);
4471                         kmem_cache_free(ext4_free_ext_cachep, entry);
4472                 }
4473         }
4474
4475         node = rb_next(new_node);
4476         if (node) {
4477                 entry = rb_entry(node, struct ext4_free_data, node);
4478                 if (can_merge(new_entry, entry)) {
4479                         new_entry->count += entry->count;
4480                         rb_erase(node, &(db->bb_free_root));
4481                         spin_lock(&sbi->s_md_lock);
4482                         list_del(&entry->list);
4483                         spin_unlock(&sbi->s_md_lock);
4484                         kmem_cache_free(ext4_free_ext_cachep, entry);
4485                 }
4486         }
4487         /* Add the extent to transaction's private list */
4488         spin_lock(&sbi->s_md_lock);
4489         list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4490         spin_unlock(&sbi->s_md_lock);
4491         ext4_unlock_group(sb, group);
4492         return 0;
4493 }
4494
4495 /*
4496  * Main entry point into mballoc to free blocks
4497  */
4498 void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4499                         unsigned long block, unsigned long count,
4500                         int metadata, unsigned long *freed)
4501 {
4502         struct buffer_head *bitmap_bh = NULL;
4503         struct super_block *sb = inode->i_sb;
4504         struct ext4_allocation_context *ac = NULL;
4505         struct ext4_group_desc *gdp;
4506         struct ext4_super_block *es;
4507         unsigned int overflow;
4508         ext4_grpblk_t bit;
4509         struct buffer_head *gd_bh;
4510         ext4_group_t block_group;
4511         struct ext4_sb_info *sbi;
4512         struct ext4_buddy e4b;
4513         int err = 0;
4514         int ret;
4515
4516         *freed = 0;
4517
4518         sbi = EXT4_SB(sb);
4519         es = EXT4_SB(sb)->s_es;
4520         if (block < le32_to_cpu(es->s_first_data_block) ||
4521             block + count < block ||
4522             block + count > ext4_blocks_count(es)) {
4523                 ext4_error(sb, __func__,
4524                             "Freeing blocks not in datazone - "
4525                             "block = %lu, count = %lu", block, count);
4526                 goto error_return;
4527         }
4528
4529         ext4_debug("freeing block %lu\n", block);
4530
4531         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4532         if (ac) {
4533                 ac->ac_op = EXT4_MB_HISTORY_FREE;
4534                 ac->ac_inode = inode;
4535                 ac->ac_sb = sb;
4536         }
4537
4538 do_more:
4539         overflow = 0;
4540         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4541
4542         /*
4543          * Check to see if we are freeing blocks across a group
4544          * boundary.
4545          */
4546         if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4547                 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4548                 count -= overflow;
4549         }
4550         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4551         if (!bitmap_bh) {
4552                 err = -EIO;
4553                 goto error_return;
4554         }
4555         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4556         if (!gdp) {
4557                 err = -EIO;
4558                 goto error_return;
4559         }
4560
4561         if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4562             in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4563             in_range(block, ext4_inode_table(sb, gdp),
4564                       EXT4_SB(sb)->s_itb_per_group) ||
4565             in_range(block + count - 1, ext4_inode_table(sb, gdp),
4566                       EXT4_SB(sb)->s_itb_per_group)) {
4567
4568                 ext4_error(sb, __func__,
4569                            "Freeing blocks in system zone - "
4570                            "Block = %lu, count = %lu", block, count);
4571                 /* err = 0. ext4_std_error should be a no op */
4572                 goto error_return;
4573         }
4574
4575         BUFFER_TRACE(bitmap_bh, "getting write access");
4576         err = ext4_journal_get_write_access(handle, bitmap_bh);
4577         if (err)
4578                 goto error_return;
4579
4580         /*
4581          * We are about to modify some metadata.  Call the journal APIs
4582          * to unshare ->b_data if a currently-committing transaction is
4583          * using it
4584          */
4585         BUFFER_TRACE(gd_bh, "get_write_access");
4586         err = ext4_journal_get_write_access(handle, gd_bh);
4587         if (err)
4588                 goto error_return;
4589
4590         err = ext4_mb_load_buddy(sb, block_group, &e4b);
4591         if (err)
4592                 goto error_return;
4593
4594 #ifdef AGGRESSIVE_CHECK
4595         {
4596                 int i;
4597                 for (i = 0; i < count; i++)
4598                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4599         }
4600 #endif
4601         mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4602                         bit, count);
4603
4604         /* We dirtied the bitmap block */
4605         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4606         err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4607
4608         if (ac) {
4609                 ac->ac_b_ex.fe_group = block_group;
4610                 ac->ac_b_ex.fe_start = bit;
4611                 ac->ac_b_ex.fe_len = count;
4612                 ext4_mb_store_history(ac);
4613         }
4614
4615         if (metadata && ext4_handle_valid(handle)) {
4616                 /* blocks being freed are metadata. these blocks shouldn't
4617                  * be used until this transaction is committed */
4618                 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4619         } else {
4620                 ext4_lock_group(sb, block_group);
4621                 mb_free_blocks(inode, &e4b, bit, count);
4622                 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4623                 ext4_unlock_group(sb, block_group);
4624         }
4625
4626         spin_lock(sb_bgl_lock(sbi, block_group));
4627         le16_add_cpu(&gdp->bg_free_blocks_count, count);
4628         gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4629         spin_unlock(sb_bgl_lock(sbi, block_group));
4630         percpu_counter_add(&sbi->s_freeblocks_counter, count);
4631
4632         if (sbi->s_log_groups_per_flex) {
4633                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4634                 spin_lock(sb_bgl_lock(sbi, flex_group));
4635                 sbi->s_flex_groups[flex_group].free_blocks += count;
4636                 spin_unlock(sb_bgl_lock(sbi, flex_group));
4637         }
4638
4639         ext4_mb_release_desc(&e4b);
4640
4641         *freed += count;
4642
4643         /* And the group descriptor block */
4644         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4645         ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4646         if (!err)
4647                 err = ret;
4648
4649         if (overflow && !err) {
4650                 block += count;
4651                 count = overflow;
4652                 put_bh(bitmap_bh);
4653                 goto do_more;
4654         }
4655         sb->s_dirt = 1;
4656 error_return:
4657         brelse(bitmap_bh);
4658         ext4_std_error(sb, err);
4659         if (ac)
4660                 kmem_cache_free(ext4_ac_cachep, ac);
4661         return;
4662 }