]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/ocfs2/dlm/dlmmaster.c
ocfs2: dump lockres info before we BUG() on a bad reference
[linux-2.6-omap-h63xx.git] / fs / ocfs2 / dlm / dlmmaster.c
index 19399446aa8e1424b9601122c722863bfb3050c1..427c0af0d219100c4bb5bbf270edf1ec6d66ecdd 100644 (file)
@@ -371,9 +371,14 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&dlm->master_lock);
-       BUG_ON(!atomic_read(&mle->mle_refs.refcount));
-
-       kref_put(&mle->mle_refs, dlm_mle_release);
+       if (!atomic_read(&mle->mle_refs.refcount)) {
+               /* this may or may not crash, but who cares.
+                * it's a BUG. */
+               mlog(ML_ERROR, "bad mle: %p\n", mle);
+               dlm_print_one_mle(mle);
+               BUG();
+       } else
+               kref_put(&mle->mle_refs, dlm_mle_release);
 }
 
 
@@ -614,6 +619,28 @@ static void dlm_lockres_release(struct kref *kref)
        mlog(0, "destroying lockres %.*s\n", res->lockname.len,
             res->lockname.name);
 
+       if (!hlist_unhashed(&res->hash_node) ||
+           !list_empty(&res->granted) ||
+           !list_empty(&res->converting) ||
+           !list_empty(&res->blocked) ||
+           !list_empty(&res->dirty) ||
+           !list_empty(&res->recovering) ||
+           !list_empty(&res->purge)) {
+               mlog(ML_ERROR,
+                    "Going to BUG for resource %.*s."
+                    "  We're on a list! [%c%c%c%c%c%c%c]\n",
+                    res->lockname.len, res->lockname.name,
+                    !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
+                    !list_empty(&res->granted) ? 'G' : ' ',
+                    !list_empty(&res->converting) ? 'C' : ' ',
+                    !list_empty(&res->blocked) ? 'B' : ' ',
+                    !list_empty(&res->dirty) ? 'D' : ' ',
+                    !list_empty(&res->recovering) ? 'R' : ' ',
+                    !list_empty(&res->purge) ? 'P' : ' ');
+
+               dlm_print_one_lock_resource(res);
+       }
+
        /* By the time we're ready to blow this guy away, we shouldn't
         * be on any lists. */
        BUG_ON(!hlist_unhashed(&res->hash_node));
@@ -1008,6 +1035,12 @@ recheck:
                     "rechecking now\n", dlm->name, res->lockname.len,
                     res->lockname.name);
                goto recheck;
+       } else {
+               if (!voting_done) {
+                       mlog(0, "map not changed and voting not done "
+                            "for %s:%.*s\n", dlm->name, res->lockname.len,
+                            res->lockname.name);
+               }
        }
 
        if (m != O2NM_MAX_NODES) {
@@ -1512,15 +1545,12 @@ way_up_top:
                                mlog_errno(-ENOMEM);
                                goto send_response;
                        }
-                       spin_lock(&dlm->spinlock);
-                       dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
-                                        name, namelen);
-                       spin_unlock(&dlm->spinlock);
                        goto way_up_top;
                }
 
                // mlog(0, "this is second time thru, already allocated, "
                // "add the block.\n");
+               dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
                set_bit(request->node_idx, mle->maybe_map);
                list_add(&mle->list, &dlm->master_list);
                response = DLM_MASTER_RESP_NO;
@@ -1694,7 +1724,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
                if (bit >= O2NM_MAX_NODES) {
                        /* not necessarily an error, though less likely.
                         * could be master just re-asserting. */
-                       mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
+                       mlog(0, "no bits set in the maybe_map, but %u "
                             "is asserting! (%.*s)\n", assert->node_idx,
                             namelen, name);
                } else if (bit != assert->node_idx) {
@@ -1706,13 +1736,30 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
                                 * number winning the mastery will respond
                                 * YES to mastery requests, but this node
                                 * had no way of knowing.  let it pass. */
-                               mlog(ML_ERROR, "%u is the lowest node, "
+                               mlog(0, "%u is the lowest node, "
                                     "%u is asserting. (%.*s)  %u must "
                                     "have begun after %u won.\n", bit,
                                     assert->node_idx, namelen, name, bit,
                                     assert->node_idx);
                        }
                }
+               if (mle->type == DLM_MLE_MIGRATION) {
+                       if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+                               mlog(0, "%s:%.*s: got cleanup assert"
+                                    " from %u for migration\n",
+                                    dlm->name, namelen, name,
+                                    assert->node_idx);
+                       } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
+                               mlog(0, "%s:%.*s: got unrelated assert"
+                                    " from %u for migration, ignoring\n",
+                                    dlm->name, namelen, name,
+                                    assert->node_idx);
+                               __dlm_put_mle(mle);
+                               spin_unlock(&dlm->master_lock);
+                               spin_unlock(&dlm->spinlock);
+                               goto done;
+                       }       
+               }
        }
        spin_unlock(&dlm->master_lock);
 
@@ -1727,7 +1774,8 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
                        goto kill;
                }
                if (!mle) {
-                       if (res->owner != assert->node_idx) {
+                       if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
+                           res->owner != assert->node_idx) {
                                mlog(ML_ERROR, "assert_master from "
                                          "%u, but current owner is "
                                          "%u! (%.*s)\n",
@@ -2253,8 +2301,8 @@ fail:
                        /* avoid hang during shutdown when migrating lockres 
                         * to a node which also goes down */
                        if (dlm_is_node_dead(dlm, target)) {
-                               mlog(0, "%s:%.*s: expected migration target %u "
-                                    "is no longer up.  restarting.\n",
+                               mlog(0, "%s:%.*s: expected migration "
+                                    "target %u is no longer up, restarting\n",
                                     dlm->name, res->lockname.len,
                                     res->lockname.name, target);
                                ret = -ERESTARTSYS;
@@ -2673,6 +2721,7 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
                        /* remove it from the list so that only one
                         * mle will be found */
                        list_del_init(&tmp->list);
+                       __dlm_mle_detach_hb_events(dlm, mle);
                }
                spin_unlock(&tmp->spinlock);
        }
@@ -2767,14 +2816,15 @@ top:
 
                /* remove from the list early.  NOTE: unlinking
                 * list_head while in list_for_each_safe */
+               __dlm_mle_detach_hb_events(dlm, mle);
                spin_lock(&mle->spinlock);
                list_del_init(&mle->list);
                atomic_set(&mle->woken, 1);
                spin_unlock(&mle->spinlock);
                wake_up(&mle->wq);
 
-               mlog(0, "node %u died during migration from "
-                    "%u to %u!\n", dead_node,
+               mlog(0, "%s: node %u died during migration from "
+                    "%u to %u!\n", dlm->name, dead_node,
                     mle->master, mle->new_master);
                /* if there is a lockres associated with this
                 * mle, find it and set its owner to UNKNOWN */