return err;
}
-static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
- struct ext4_buddy *e4b)
+static noinline_for_stack int
+ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+ struct ext4_buddy *e4b)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct inode *inode = sbi->s_buddy_cache;
return 0;
}
-static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
ext4_group_t group;
ext4_group_t i;
int i;
if (sbi->s_mb_proc != NULL) {
- struct proc_dir_entry *p;
- p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
- if (p) {
- p->proc_fops = &ext4_mb_seq_history_fops;
- p->data = sb;
- }
- p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
- if (p) {
- p->proc_fops = &ext4_mb_seq_groups_fops;
- p->data = sb;
- }
+ proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc,
+ &ext4_mb_seq_history_fops, sb);
+ proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc,
+ &ext4_mb_seq_groups_fops, sb);
}
sbi->s_mb_history_max = 1000;
/* if we can't allocate history, then we simple won't use it */
}
-static void ext4_mb_store_history(struct ext4_allocation_context *ac)
+static noinline_for_stack void
+ext4_mb_store_history(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_mb_history h;
return 0;
}
-static void ext4_mb_free_committed_blocks(struct super_block *sb)
+static noinline_for_stack void
+ext4_mb_free_committed_blocks(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
mb_debug("freed %u blocks in %u structures\n", count, count2);
}
-#define EXT4_ROOT "ext4"
#define EXT4_MB_STATS_NAME "stats"
#define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
#define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
return -ENOMEM;
}
#ifdef CONFIG_PROC_FS
- proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs);
+ proc_root_ext4 = proc_mkdir("fs/ext4", NULL);
if (proc_root_ext4 == NULL)
- printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT);
+ printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n");
#endif
return 0;
}
kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep);
#ifdef CONFIG_PROC_FS
- remove_proc_entry(EXT4_ROOT, proc_root_fs);
+ remove_proc_entry("fs/ext4", NULL);
#endif
}
* Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
* Returns 0 if success or error code
*/
-static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+static noinline_for_stack int
+ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
handle_t *handle)
{
struct buffer_head *bitmap_bh = NULL;
ac->ac_b_ex.fe_group,
gdp));
}
- gdp->bg_free_blocks_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
- - ac->ac_b_ex.fe_len);
+ le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
* Normalization means making request better in terms of
* size and alignment
*/
-static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+static noinline_for_stack void
+ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
int bsbits, max;
ext4_lblk_t end;
- struct list_head *cur;
loff_t size, orig_size, start_off;
ext4_lblk_t start, orig_start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+ struct ext4_prealloc_space *pa;
/* do normalize only data requests, metadata requests
do not need preallocation */
/* check we don't cross already preallocated blocks */
rcu_read_lock();
- list_for_each_rcu(cur, &ei->i_prealloc_list) {
- struct ext4_prealloc_space *pa;
+ list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
unsigned long pa_end;
- pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
-
if (pa->pa_deleted)
continue;
spin_lock(&pa->pa_lock);
/* XXX: extra loop to check we really don't overlap preallocations */
rcu_read_lock();
- list_for_each_rcu(cur, &ei->i_prealloc_list) {
- struct ext4_prealloc_space *pa;
+ list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
unsigned long pa_end;
- pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
spin_lock(&pa->pa_lock);
if (pa->pa_deleted == 0) {
pa_end = pa->pa_lstart + pa->pa_len;
/*
* search goal blocks in preallocated space
*/
-static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
{
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
struct ext4_prealloc_space *pa;
- struct list_head *cur;
/* only data can be preallocated */
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
/* first, try per-file preallocation */
rcu_read_lock();
- list_for_each_rcu(cur, &ei->i_prealloc_list) {
- pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
+ list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
/* all fields in this condition don't change,
* so we can skip locking for them */
return 0;
rcu_read_lock();
- list_for_each_rcu(cur, &lg->lg_prealloc_list) {
- pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
+ list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
spin_lock(&pa->pa_lock);
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
atomic_inc(&pa->pa_count);
/*
* creates new preallocated space for given inode
*/
-static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
struct ext4_prealloc_space *pa;
/*
* creates new preallocated space for locality group inodes belongs to
*/
-static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
struct ext4_locality_group *lg;
* the caller MUST hold group/inode locks.
* TODO: optimize the case when there are no in-core structures yet
*/
-static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
- struct buffer_head *bitmap_bh,
+static noinline_for_stack int
+ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
struct ext4_prealloc_space *pa)
{
struct ext4_allocation_context *ac;
return err;
}
-static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+static noinline_for_stack int
+ext4_mb_release_group_pa(struct ext4_buddy *e4b,
struct ext4_prealloc_space *pa)
{
struct ext4_allocation_context *ac;
* - how many do we discard
* 1) how many requested
*/
-static int ext4_mb_discard_group_preallocations(struct super_block *sb,
+static noinline_for_stack int
+ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_group_t group, int needed)
{
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
mutex_lock(&ac->ac_lg->lg_mutex);
}
-static int ext4_mb_initialize_context(struct ext4_allocation_context *ac,
+static noinline_for_stack int
+ext4_mb_initialize_context(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
struct super_block *sb = ar->inode->i_sb;
ext4_mb_free_committed_blocks(sb);
}
-static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+static noinline_for_stack int
+ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
ext4_group_t group, ext4_grpblk_t block, int count)
{
struct ext4_group_info *db = e4b->bd_info;
}
spin_lock(sb_bgl_lock(sbi, block_group));
- gdp->bg_free_blocks_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
+ le16_add_cpu(&gdp->bg_free_blocks_count, count);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_add(&sbi->s_freeblocks_counter, count);