X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Fext4%2Fmballoc.c;h=4865d318d7e1631c871e0985dfe66a427976275c;hb=e8546d0615542684ca02ba03edebec1a503beb6b;hp=76e5fedc0a0b4c7e581852d9eeab4374ba246a24;hpb=c1f3ee120bb61045b1c0a3ead620d1d65af47130;p=linux-2.6-omap-h63xx.git diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 76e5fedc0a0..4865d318d7e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -420,6 +420,7 @@ #define MB_DEFAULT_GROUP_PREALLOC 512 static struct kmem_cache *ext4_pspace_cachep; +static struct kmem_cache *ext4_ac_cachep; #ifdef EXT4_BB_MAX_BLOCKS #undef EXT4_BB_MAX_BLOCKS @@ -626,21 +627,19 @@ static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, return block; } +static inline void *mb_correct_addr_and_bit(int *bit, void *addr) +{ #if BITS_PER_LONG == 64 -#define mb_correct_addr_and_bit(bit, addr) \ -{ \ - bit += ((unsigned long) addr & 7UL) << 3; \ - addr = (void *) ((unsigned long) addr & ~7UL); \ -} + *bit += ((unsigned long) addr & 7UL) << 3; + addr = (void *) ((unsigned long) addr & ~7UL); #elif BITS_PER_LONG == 32 -#define mb_correct_addr_and_bit(bit, addr) \ -{ \ - bit += ((unsigned long) addr & 3UL) << 3; \ - addr = (void *) ((unsigned long) addr & ~3UL); \ -} + *bit += ((unsigned long) addr & 3UL) << 3; + addr = (void *) ((unsigned long) addr & ~3UL); #else #error "how many bits you are?!" #endif + return addr; +} static inline int mb_test_bit(int bit, void *addr) { @@ -648,39 +647,58 @@ static inline int mb_test_bit(int bit, void *addr) * ext4_test_bit on architecture like powerpc * needs unsigned long aligned address */ - mb_correct_addr_and_bit(bit, addr); + addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_bit(bit, addr); } static inline void mb_set_bit(int bit, void *addr) { - mb_correct_addr_and_bit(bit, addr); + addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit(bit, addr); } static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) { - mb_correct_addr_and_bit(bit, addr); + addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit_atomic(lock, bit, addr); } static inline void mb_clear_bit(int bit, void *addr) { - mb_correct_addr_and_bit(bit, addr); + addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit(bit, addr); } static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) { - mb_correct_addr_and_bit(bit, addr); + addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit_atomic(lock, bit, addr); } +static inline int mb_find_next_zero_bit(void *addr, int max, int start) +{ + int fix = 0; + addr = mb_correct_addr_and_bit(&fix, addr); + max += fix; + start += fix; + + return ext4_find_next_zero_bit(addr, max, start) - fix; +} + +static inline int mb_find_next_bit(void *addr, int max, int start) +{ + int fix = 0; + addr = mb_correct_addr_and_bit(&fix, addr); + max += fix; + start += fix; + + return ext4_find_next_bit(addr, max, start) - fix; +} + static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) { char *bb; - /* FIXME!! is this needed */ BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); BUG_ON(max == NULL); @@ -906,7 +924,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, unsigned short chunk; unsigned short border; - BUG_ON(len >= EXT4_BLOCKS_PER_GROUP(sb)); + BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); border = 2 << sb->s_blocksize_bits; @@ -946,12 +964,12 @@ static void ext4_mb_generate_buddy(struct super_block *sb, /* initialize buddy from bitmap which is aggregation * of on-disk bitmap and preallocations */ - i = ext4_find_next_zero_bit(bitmap, max, 0); + i = mb_find_next_zero_bit(bitmap, max, 0); grp->bb_first_free = i; while (i < max) { fragments++; first = i; - i = ext4_find_next_bit(bitmap, max, i); + i = mb_find_next_bit(bitmap, max, i); len = i - first; free += len; if (len > 1) @@ -959,14 +977,18 @@ static void ext4_mb_generate_buddy(struct super_block *sb, else grp->bb_counters[0]++; if (i < max) - i = ext4_find_next_zero_bit(bitmap, max, i); + i = mb_find_next_zero_bit(bitmap, max, i); } grp->bb_fragments = fragments; if (free != grp->bb_free) { - printk(KERN_DEBUG + ext4_error(sb, __FUNCTION__, "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", group, free, grp->bb_free); + /* + * If we intent to continue, we consider group descritor + * corrupt and update bb_free using bitmap value + */ grp->bb_free = free; } @@ -1146,8 +1168,9 @@ out: return err; } -static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, - struct ext4_buddy *e4b) +static noinline_for_stack int +ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + struct ext4_buddy *e4b) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; @@ -1778,7 +1801,7 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, buddy = mb_find_buddy(e4b, i, &max); BUG_ON(buddy == NULL); - k = ext4_find_next_zero_bit(buddy, max, 0); + k = mb_find_next_zero_bit(buddy, max, 0); BUG_ON(k >= max); ac->ac_found++; @@ -1818,16 +1841,33 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, i = e4b->bd_info->bb_first_free; while (free && ac->ac_status == AC_STATUS_CONTINUE) { - i = ext4_find_next_zero_bit(bitmap, + i = mb_find_next_zero_bit(bitmap, EXT4_BLOCKS_PER_GROUP(sb), i); if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { - BUG_ON(free != 0); + /* + * IF we have corrupt bitmap, we won't find any + * free blocks even though group info says we + * we have free blocks + */ + ext4_error(sb, __FUNCTION__, "%d free blocks as per " + "group info. But bitmap says 0\n", + free); break; } mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); BUG_ON(ex.fe_len <= 0); - BUG_ON(free < ex.fe_len); + if (free < ex.fe_len) { + ext4_error(sb, __FUNCTION__, "%d free blocks as per " + "group info. But got %d blocks\n", + free, ex.fe_len); + /* + * The number of free blocks differs. This mostly + * indicate that the bitmap is corrupt. So exit + * without claiming the space. + */ + break; + } ext4_mb_measure_extent(ac, &ex, e4b); @@ -1926,7 +1966,8 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, return 0; } -static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) +static noinline_for_stack int +ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t group; ext4_group_t i; @@ -2410,17 +2451,10 @@ static void ext4_mb_history_init(struct super_block *sb) int i; if (sbi->s_mb_proc != NULL) { - struct proc_dir_entry *p; - p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc); - if (p) { - p->proc_fops = &ext4_mb_seq_history_fops; - p->data = sb; - } - p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc); - if (p) { - p->proc_fops = &ext4_mb_seq_groups_fops; - p->data = sb; - } + proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc, + &ext4_mb_seq_history_fops, sb); + proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc, + &ext4_mb_seq_groups_fops, sb); } sbi->s_mb_history_max = 1000; @@ -2433,7 +2467,8 @@ static void ext4_mb_history_init(struct super_block *sb) /* if we can't allocate history, then we simple won't use it */ } -static void ext4_mb_store_history(struct ext4_allocation_context *ac) +static noinline_for_stack void +ext4_mb_store_history(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_mb_history h; @@ -2769,7 +2804,8 @@ int ext4_mb_release(struct super_block *sb) return 0; } -static void ext4_mb_free_committed_blocks(struct super_block *sb) +static noinline_for_stack void +ext4_mb_free_committed_blocks(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); int err; @@ -2828,7 +2864,6 @@ static void ext4_mb_free_committed_blocks(struct super_block *sb) mb_debug("freed %u blocks in %u structures\n", count, count2); } -#define EXT4_ROOT "ext4" #define EXT4_MB_STATS_NAME "stats" #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan" #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan" @@ -2959,12 +2994,19 @@ int __init init_ext4_mballoc(void) if (ext4_pspace_cachep == NULL) return -ENOMEM; + ext4_ac_cachep = + kmem_cache_create("ext4_alloc_context", + sizeof(struct ext4_allocation_context), + 0, SLAB_RECLAIM_ACCOUNT, NULL); + if (ext4_ac_cachep == NULL) { + kmem_cache_destroy(ext4_pspace_cachep); + return -ENOMEM; + } #ifdef CONFIG_PROC_FS - proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs); + proc_root_ext4 = proc_mkdir("fs/ext4", NULL); if (proc_root_ext4 == NULL) - printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT); + printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n"); #endif - return 0; } @@ -2972,8 +3014,9 @@ void exit_ext4_mballoc(void) { /* XXX: synchronize_rcu(); */ kmem_cache_destroy(ext4_pspace_cachep); + kmem_cache_destroy(ext4_ac_cachep); #ifdef CONFIG_PROC_FS - remove_proc_entry(EXT4_ROOT, proc_root_fs); + remove_proc_entry("fs/ext4", NULL); #endif } @@ -2982,7 +3025,8 @@ void exit_ext4_mballoc(void) * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps * Returns 0 if success or error code */ -static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, +static noinline_for_stack int +ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle) { struct buffer_head *bitmap_bh = NULL; @@ -3055,9 +3099,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ac->ac_b_ex.fe_group, gdp)); } - gdp->bg_free_blocks_count = - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - - ac->ac_b_ex.fe_len); + le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len); gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); @@ -3069,7 +3111,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, out_err: sb->s_dirt = 1; - put_bh(bitmap_bh); + brelse(bitmap_bh); return err; } @@ -3099,15 +3141,16 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) * Normalization means making request better in terms of * size and alignment */ -static void ext4_mb_normalize_request(struct ext4_allocation_context *ac, +static noinline_for_stack void +ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { int bsbits, max; ext4_lblk_t end; - struct list_head *cur; loff_t size, orig_size, start_off; ext4_lblk_t start, orig_start; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); + struct ext4_prealloc_space *pa; /* do normalize only data requests, metadata requests do not need preallocation */ @@ -3193,12 +3236,9 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac, /* check we don't cross already preallocated blocks */ rcu_read_lock(); - list_for_each_rcu(cur, &ei->i_prealloc_list) { - struct ext4_prealloc_space *pa; + list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { unsigned long pa_end; - pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list); - if (pa->pa_deleted) continue; spin_lock(&pa->pa_lock); @@ -3240,10 +3280,8 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac, /* XXX: extra loop to check we really don't overlap preallocations */ rcu_read_lock(); - list_for_each_rcu(cur, &ei->i_prealloc_list) { - struct ext4_prealloc_space *pa; + list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { unsigned long pa_end; - pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list); spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0) { pa_end = pa->pa_lstart + pa->pa_len; @@ -3354,13 +3392,10 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, ac->ac_pa = pa; /* we don't correct pa_pstart or pa_plen here to avoid - * possible race when tte group is being loaded concurrently + * possible race when the group is being loaded concurrently * instead we correct pa later, after blocks are marked - * in on-disk bitmap -- see ext4_mb_release_context() */ - /* - * FIXME!! but the other CPUs can look at this particular - * pa and think that it have enought free blocks if we - * don't update pa_free here right ? + * in on-disk bitmap -- see ext4_mb_release_context() + * Other CPUs are prevented from allocating from this pa by lg_mutex */ mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); } @@ -3368,12 +3403,12 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, /* * search goal blocks in preallocated space */ -static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) +static noinline_for_stack int +ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; - struct list_head *cur; /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) @@ -3381,8 +3416,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) /* first, try per-file preallocation */ rcu_read_lock(); - list_for_each_rcu(cur, &ei->i_prealloc_list) { - pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list); + list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { /* all fields in this condition don't change, * so we can skip locking for them */ @@ -3414,8 +3448,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) return 0; rcu_read_lock(); - list_for_each_rcu(cur, &lg->lg_prealloc_list) { - pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list); + list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) { spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) { atomic_inc(&pa->pa_count); @@ -3535,7 +3568,8 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, /* * creates new preallocated space for given inode */ -static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) +static noinline_for_stack int +ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_prealloc_space *pa; @@ -3622,7 +3656,8 @@ static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) /* * creates new preallocated space for locality group inodes belongs to */ -static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac) +static noinline_for_stack int +ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; @@ -3695,11 +3730,11 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) * the caller MUST hold group/inode locks. * TODO: optimize the case when there are no in-core structures yet */ -static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, - struct buffer_head *bitmap_bh, +static noinline_for_stack int +ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa) { - struct ext4_allocation_context ac; + struct ext4_allocation_context *ac; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long end; @@ -3715,15 +3750,19 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, BUG_ON(group != e4b->bd_group && pa->pa_len != 0); end = bit + pa->pa_len; - ac.ac_sb = sb; - ac.ac_inode = pa->pa_inode; - ac.ac_op = EXT4_MB_HISTORY_DISCARD; + ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); + + if (ac) { + ac->ac_sb = sb; + ac->ac_inode = pa->pa_inode; + ac->ac_op = EXT4_MB_HISTORY_DISCARD; + } while (bit < end) { - bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); + bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); if (bit >= end) break; - next = ext4_find_next_bit(bitmap_bh->b_data, end, bit); + next = mb_find_next_bit(bitmap_bh->b_data, end, bit); if (next > end) next = end; start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + @@ -3733,37 +3772,49 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, (unsigned) group); free += next - bit; - ac.ac_b_ex.fe_group = group; - ac.ac_b_ex.fe_start = bit; - ac.ac_b_ex.fe_len = next - bit; - ac.ac_b_ex.fe_logical = 0; - ext4_mb_store_history(&ac); + if (ac) { + ac->ac_b_ex.fe_group = group; + ac->ac_b_ex.fe_start = bit; + ac->ac_b_ex.fe_len = next - bit; + ac->ac_b_ex.fe_logical = 0; + ext4_mb_store_history(ac); + } mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } if (free != pa->pa_free) { - printk(KERN_ERR "pa %p: logic %lu, phys. %lu, len %lu\n", + printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, (unsigned long) pa->pa_len); - printk(KERN_ERR "free %u, pa_free %u\n", free, pa->pa_free); + ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n", + free, pa->pa_free); + /* + * pa is already deleted so we use the value obtained + * from the bitmap and continue. + */ } - BUG_ON(free != pa->pa_free); atomic_add(free, &sbi->s_mb_discarded); + if (ac) + kmem_cache_free(ext4_ac_cachep, ac); return err; } -static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, +static noinline_for_stack int +ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa) { - struct ext4_allocation_context ac; + struct ext4_allocation_context *ac; struct super_block *sb = e4b->bd_sb; ext4_group_t group; ext4_grpblk_t bit; - ac.ac_op = EXT4_MB_HISTORY_DISCARD; + ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); + + if (ac) + ac->ac_op = EXT4_MB_HISTORY_DISCARD; BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); @@ -3771,13 +3822,16 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); - ac.ac_sb = sb; - ac.ac_inode = NULL; - ac.ac_b_ex.fe_group = group; - ac.ac_b_ex.fe_start = bit; - ac.ac_b_ex.fe_len = pa->pa_len; - ac.ac_b_ex.fe_logical = 0; - ext4_mb_store_history(&ac); + if (ac) { + ac->ac_sb = sb; + ac->ac_inode = NULL; + ac->ac_b_ex.fe_group = group; + ac->ac_b_ex.fe_start = bit; + ac->ac_b_ex.fe_len = pa->pa_len; + ac->ac_b_ex.fe_logical = 0; + ext4_mb_store_history(ac); + kmem_cache_free(ext4_ac_cachep, ac); + } return 0; } @@ -3791,7 +3845,8 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, * - how many do we discard * 1) how many requested */ -static int ext4_mb_discard_group_preallocations(struct super_block *sb, +static noinline_for_stack int +ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int needed) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); @@ -4113,7 +4168,8 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) mutex_lock(&ac->ac_lg->lg_mutex); } -static int ext4_mb_initialize_context(struct ext4_allocation_context *ac, +static noinline_for_stack int +ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct super_block *sb = ar->inode->i_sb; @@ -4231,7 +4287,7 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { - struct ext4_allocation_context ac; + struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; @@ -4257,53 +4313,60 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, } inquota = ar->len; + ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); + if (!ac) { + *errp = -ENOMEM; + return 0; + } + ext4_mb_poll_new_transaction(sb, handle); - *errp = ext4_mb_initialize_context(&ac, ar); + *errp = ext4_mb_initialize_context(ac, ar); if (*errp) { ar->len = 0; goto out; } - ac.ac_op = EXT4_MB_HISTORY_PREALLOC; - if (!ext4_mb_use_preallocated(&ac)) { + ac->ac_op = EXT4_MB_HISTORY_PREALLOC; + if (!ext4_mb_use_preallocated(ac)) { - ac.ac_op = EXT4_MB_HISTORY_ALLOC; - ext4_mb_normalize_request(&ac, ar); + ac->ac_op = EXT4_MB_HISTORY_ALLOC; + ext4_mb_normalize_request(ac, ar); repeat: /* allocate space in core */ - ext4_mb_regular_allocator(&ac); + ext4_mb_regular_allocator(ac); /* as we've just preallocated more space than * user requested orinally, we store allocated * space in a special descriptor */ - if (ac.ac_status == AC_STATUS_FOUND && - ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len) - ext4_mb_new_preallocation(&ac); + if (ac->ac_status == AC_STATUS_FOUND && + ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) + ext4_mb_new_preallocation(ac); } - if (likely(ac.ac_status == AC_STATUS_FOUND)) { - ext4_mb_mark_diskspace_used(&ac, handle); + if (likely(ac->ac_status == AC_STATUS_FOUND)) { + ext4_mb_mark_diskspace_used(ac, handle); *errp = 0; - block = ext4_grp_offs_to_block(sb, &ac.ac_b_ex); - ar->len = ac.ac_b_ex.fe_len; + block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); + ar->len = ac->ac_b_ex.fe_len; } else { - freed = ext4_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len); + freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); if (freed) goto repeat; *errp = -ENOSPC; - ac.ac_b_ex.fe_len = 0; + ac->ac_b_ex.fe_len = 0; ar->len = 0; - ext4_mb_show_ac(&ac); + ext4_mb_show_ac(ac); } - ext4_mb_release_context(&ac); + ext4_mb_release_context(ac); out: if (ar->len < inquota) DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); + kmem_cache_free(ext4_ac_cachep, ac); return block; } static void ext4_mb_poll_new_transaction(struct super_block *sb, @@ -4337,7 +4400,8 @@ static void ext4_mb_poll_new_transaction(struct super_block *sb, ext4_mb_free_committed_blocks(sb); } -static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, +static noinline_for_stack int +ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, ext4_group_t group, ext4_grpblk_t block, int count) { struct ext4_group_info *db = e4b->bd_info; @@ -4405,9 +4469,9 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, unsigned long block, unsigned long count, int metadata, unsigned long *freed) { - struct buffer_head *bitmap_bh = 0; + struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; - struct ext4_allocation_context ac; + struct ext4_allocation_context *ac = NULL; struct ext4_group_desc *gdp; struct ext4_super_block *es; unsigned long overflow; @@ -4436,9 +4500,12 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, ext4_debug("freeing block %lu\n", block); - ac.ac_op = EXT4_MB_HISTORY_FREE; - ac.ac_inode = inode; - ac.ac_sb = sb; + ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); + if (ac) { + ac->ac_op = EXT4_MB_HISTORY_FREE; + ac->ac_inode = inode; + ac->ac_sb = sb; + } do_more: overflow = 0; @@ -4504,10 +4571,12 @@ do_more: BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_journal_dirty_metadata(handle, bitmap_bh); - ac.ac_b_ex.fe_group = block_group; - ac.ac_b_ex.fe_start = bit; - ac.ac_b_ex.fe_len = count; - ext4_mb_store_history(&ac); + if (ac) { + ac->ac_b_ex.fe_group = block_group; + ac->ac_b_ex.fe_start = bit; + ac->ac_b_ex.fe_len = count; + ext4_mb_store_history(ac); + } if (metadata) { /* blocks being freed are metadata. these blocks shouldn't @@ -4522,8 +4591,7 @@ do_more: } spin_lock(sb_bgl_lock(sbi, block_group)); - gdp->bg_free_blocks_count = - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count); + le16_add_cpu(&gdp->bg_free_blocks_count, count); gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); spin_unlock(sb_bgl_lock(sbi, block_group)); percpu_counter_add(&sbi->s_freeblocks_counter, count); @@ -4548,5 +4616,7 @@ do_more: error_return: brelse(bitmap_bh); ext4_std_error(sb, err); + if (ac) + kmem_cache_free(ext4_ac_cachep, ac); return; }