static void gfs2_glock_hold(struct gfs2_glock *gl)
 {
+       GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
        atomic_inc(&gl->gl_ref);
 }
 
                        atomic_dec(&lru_count);
                }
                spin_unlock(&lru_lock);
-               GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru));
                GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
                glock_free(gl);
                rv = 1;
                goto out;
        }
-       write_unlock(gl_lock_addr(gl->gl_hash));
        /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
        if (atomic_read(&gl->gl_ref) == 2)
                gfs2_glock_schedule_for_reclaim(gl);
+       write_unlock(gl_lock_addr(gl->gl_hash));
 out:
        return rv;
 }
 
        GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 
+       down_read(&gfs2_umount_flush_sem);
        if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
            gl->gl_demote_state != gl->gl_state) {
                if (find_first_holder(gl))
-                       goto out;
+                       goto out_unlock;
                if (nonblock)
                        goto out_sched;
                set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
                        gfs2_demote_wake(gl);
                ret = do_promote(gl);
                if (ret == 0)
-                       goto out;
+                       goto out_unlock;
                if (ret == 2)
-                       return;
+                       goto out_sem;
                gh = find_first_waiter(gl);
                gl->gl_target = gh->gh_state;
                if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
                        do_error(gl, 0); /* Fail queued try locks */
        }
        do_xmote(gl, gh, gl->gl_target);
+out_sem:
+       up_read(&gfs2_umount_flush_sem);
        return;
 
 out_sched:
        gfs2_glock_hold(gl);
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
-out:
+out_unlock:
        clear_bit(GLF_LOCK, &gl->gl_flags);
+       goto out_sem;
 }
 
 static void glock_work_func(struct work_struct *work)
 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
 {
        struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
-       down_read(&gfs2_umount_flush_sem);
        gl->gl_reply = ret;
        if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
                struct gfs2_holder *gh;
                    ((ret & ~LM_OUT_ST_MASK) != 0))
                        set_bit(GLF_FROZEN, &gl->gl_flags);
                spin_unlock(&gl->gl_spin);
-               if (test_bit(GLF_FROZEN, &gl->gl_flags)) {
-                       up_read(&gfs2_umount_flush_sem);
+               if (test_bit(GLF_FROZEN, &gl->gl_flags))
                        return;
-               }
        }
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
        gfs2_glock_hold(gl);
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
-       up_read(&gfs2_umount_flush_sem);
 }
 
 /**
 {
        if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
                return;
-       down_read(&gfs2_umount_flush_sem);
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
        gfs2_glock_hold(gl);
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
-       up_read(&gfs2_umount_flush_sem);
 }
 
 /**
        if (test_bit(GLF_REPLY_PENDING, gflags))
                *p++ = 'r';
        if (test_bit(GLF_INITIAL, gflags))
-               *p++ = 'i';
+               *p++ = 'I';
        if (test_bit(GLF_FROZEN, gflags))
                *p++ = 'F';
        *p = 0;
 
 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
-       unsigned int blocks;
        struct list_head *head = &gl->gl_ail_list;
        struct gfs2_bufdata *bd;
        struct buffer_head *bh;
-       int error;
+       struct gfs2_trans tr;
 
-       blocks = atomic_read(&gl->gl_ail_count);
-       if (!blocks)
-               return;
+       memset(&tr, 0, sizeof(tr));
+       tr.tr_revokes = atomic_read(&gl->gl_ail_count);
 
-       error = gfs2_trans_begin(sdp, 0, blocks);
-       if (gfs2_assert_withdraw(sdp, !error))
+       if (!tr.tr_revokes)
                return;
 
+       /* A shortened, inline version of gfs2_trans_begin() */
+       tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
+       tr.tr_ip = (unsigned long)__builtin_return_address(0);
+       INIT_LIST_HEAD(&tr.tr_list_buf);
+       gfs2_log_reserve(sdp, tr.tr_reserved);
+       BUG_ON(current->journal_info);
+       current->journal_info = &tr;
+
        gfs2_log_lock(sdp);
        while (!list_empty(head)) {
                bd = list_entry(head->next, struct gfs2_bufdata,
 
 
        if (!tr->tr_touched) {
                gfs2_log_release(sdp, tr->tr_reserved);
-               gfs2_glock_dq(&tr->tr_t_gh);
-               gfs2_holder_uninit(&tr->tr_t_gh);
-               kfree(tr);
+               if (tr->tr_t_gh.gh_gl) {
+                       gfs2_glock_dq(&tr->tr_t_gh);
+                       gfs2_holder_uninit(&tr->tr_t_gh);
+                       kfree(tr);
+               }
                return;
        }
 
        }
 
        gfs2_log_commit(sdp, tr);
-        gfs2_glock_dq(&tr->tr_t_gh);
-        gfs2_holder_uninit(&tr->tr_t_gh);
-        kfree(tr);
+       if (tr->tr_t_gh.gh_gl) {
+               gfs2_glock_dq(&tr->tr_t_gh);
+               gfs2_holder_uninit(&tr->tr_t_gh);
+               kfree(tr);
+       }
 
        if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
                gfs2_log_flush(sdp, NULL);