* ocfs2_lock_allocators(). It greatly over-estimates
                 * the work to be done.
                 */
-               ret = ocfs2_lock_allocators(inode, di, clusters_to_alloc,
-                                           extents_to_split, &data_ac, &meta_ac);
+               ret = ocfs2_lock_allocators(inode, wc->w_di_bh,
+                                           clusters_to_alloc, extents_to_split,
+                                           &data_ac, &meta_ac);
                if (ret) {
                        mlog_errno(ret);
                        goto out;
 
        spin_lock(&OCFS2_I(dir)->ip_lock);
        if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
                spin_unlock(&OCFS2_I(dir)->ip_lock);
-               num_free_extents = ocfs2_num_free_extents(osb, dir, fe);
+               num_free_extents = ocfs2_num_free_extents(osb, dir,
+                                                         parent_fe_bh);
                if (num_free_extents < 0) {
                        status = num_free_extents;
                        mlog_errno(status);
 
        if (mark_unwritten)
                flags = OCFS2_EXT_UNWRITTEN;
 
-       free_extents = ocfs2_num_free_extents(osb, inode, fe);
+       free_extents = ocfs2_num_free_extents(osb, inode, fe_bh);
        if (free_extents < 0) {
                status = free_extents;
                mlog_errno(status);
  * File systems which don't support holes call this from
  * ocfs2_extend_allocation().
  */
-int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
+int ocfs2_lock_allocators(struct inode *inode, struct buffer_head *di_bh,
                          u32 clusters_to_add, u32 extents_to_split,
                          struct ocfs2_alloc_context **data_ac,
                          struct ocfs2_alloc_context **meta_ac)
        int ret = 0, num_free_extents;
        unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
        *meta_ac = NULL;
        if (data_ac)
             (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode),
             le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
 
-       num_free_extents = ocfs2_num_free_extents(osb, inode, di);
+       num_free_extents = ocfs2_num_free_extents(osb, inode, di_bh);
        if (num_free_extents < 0) {
                ret = num_free_extents;
                mlog_errno(ret);
 restart_all:
        BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
 
-       status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
+       status = ocfs2_lock_allocators(inode, bh, clusters_to_add, 0, &data_ac,
                                       &meta_ac);
        if (status) {
                mlog_errno(status);
        struct ocfs2_alloc_context *meta_ac = NULL;
        struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
-       ret = ocfs2_lock_allocators(inode, di, 0, 1, NULL, &meta_ac);
+       ret = ocfs2_lock_allocators(inode, di_bh, 0, 1, NULL, &meta_ac);
        if (ret) {
                mlog_errno(ret);
                return ret;
 
                               enum ocfs2_alloc_restarted *reason_ret);
 int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
                          u64 zero_to);
-int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
+int ocfs2_lock_allocators(struct inode *inode, struct buffer_head *fe,
                          u32 clusters_to_add, u32 extents_to_split,
                          struct ocfs2_alloc_context **data_ac,
                          struct ocfs2_alloc_context **meta_ac);