]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/ocfs2/dlm/dlmmaster.c
ocfs2/dlm: dlm_set_lockres_owner() and dlm_change_lockres_owner() inlined
[linux-2.6-omap-h63xx.git] / fs / ocfs2 / dlm / dlmmaster.c
index ec6da3c37dc825af2fcf268c730e2d18f3e24503..d9c5c5a340078217d43c28da72c1c41a292531f6 100644 (file)
@@ -69,7 +69,8 @@ static int dlm_do_assert_master(struct dlm_ctxt *dlm,
 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
 
 static inline void __dlm_mle_name(struct dlm_master_list_entry *mle,
-                                 unsigned char **name, unsigned int *namelen)
+                                 unsigned char **name, unsigned int *namelen,
+                                 unsigned int *namehash)
 {
        BUG_ON(mle->type != DLM_MLE_BLOCK &&
               mle->type != DLM_MLE_MASTER &&
@@ -78,9 +79,13 @@ static inline void __dlm_mle_name(struct dlm_master_list_entry *mle,
        if (mle->type != DLM_MLE_MASTER) {
                *name = mle->u.mlename.name;
                *namelen = mle->u.mlename.len;
+               if (namehash)
+                       *namehash = mle->u.mlename.hash;
        } else {
                *name  = (unsigned char *)mle->u.mleres->lockname.name;
                *namelen = mle->u.mleres->lockname.len;
+               if (namehash)
+                       *namehash = mle->u.mleres->lockname.hash;
        }
 }
 
@@ -95,7 +100,7 @@ static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
        if (dlm != mle->dlm)
                return 0;
 
-       __dlm_mle_name(mle, &mlename, &mlelen);
+       __dlm_mle_name(mle, &mlename, &mlelen, NULL);
 
        if (namelen != mlelen || memcmp(name, mlename, namelen) != 0)
                return 0;
@@ -294,7 +299,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
 
        mle->dlm = dlm;
        mle->type = type;
-       INIT_LIST_HEAD(&mle->list);
+       INIT_HLIST_NODE(&mle->master_hash_node);
        INIT_LIST_HEAD(&mle->hb_events);
        memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
        spin_lock_init(&mle->spinlock);
@@ -317,8 +322,12 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
                BUG_ON(!name);
                memcpy(mle->u.mlename.name, name, namelen);
                mle->u.mlename.len = namelen;
+               mle->u.mlename.hash = dlm_lockid_hash(name, namelen);
        }
 
+       atomic_inc(&dlm->mle_tot_count[mle->type]);
+       atomic_inc(&dlm->mle_cur_count[mle->type]);
+
        /* copy off the node_map and register hb callbacks on our copy */
        memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
        memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
@@ -334,15 +343,21 @@ void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&dlm->master_lock);
 
-       if (!list_empty(&mle->list))
-               list_del_init(&mle->list);
+       if (!hlist_unhashed(&mle->master_hash_node))
+               hlist_del_init(&mle->master_hash_node);
 }
 
 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
 {
+       struct hlist_head *bucket;
+       unsigned char *mname;
+       unsigned int mlen, hash;
+
        assert_spin_locked(&dlm->master_lock);
 
-       list_add(&mle->list, &dlm->master_list);
+       __dlm_mle_name(mle, &mname, &mlen, &hash);
+       bucket = dlm_master_hash(dlm, hash);
+       hlist_add_head(&mle->master_hash_node, bucket);
 }
 
 /* returns 1 if found, 0 if not */
@@ -351,10 +366,17 @@ static int dlm_find_mle(struct dlm_ctxt *dlm,
                        char *name, unsigned int namelen)
 {
        struct dlm_master_list_entry *tmpmle;
+       struct hlist_head *bucket;
+       struct hlist_node *list;
+       unsigned int hash;
 
        assert_spin_locked(&dlm->master_lock);
 
-       list_for_each_entry(tmpmle, &dlm->master_list, list) {
+       hash = dlm_lockid_hash(name, namelen);
+       bucket = dlm_master_hash(dlm, hash);
+       hlist_for_each(list, bucket) {
+               tmpmle = hlist_entry(list, struct dlm_master_list_entry,
+                                    master_hash_node);
                if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
                        continue;
                dlm_get_mle(tmpmle);
@@ -428,29 +450,28 @@ static void dlm_mle_release(struct kref *kref)
 {
        struct dlm_master_list_entry *mle;
        struct dlm_ctxt *dlm;
+       unsigned char *mname;
+       unsigned int mlen;
 
        mlog_entry_void();
 
        mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
        dlm = mle->dlm;
 
-       if (mle->type != DLM_MLE_MASTER) {
-               mlog(0, "calling mle_release for %.*s, type %d\n",
-                    mle->u.mlename.len, mle->u.mlename.name, mle->type);
-       } else {
-               mlog(0, "calling mle_release for %.*s, type %d\n",
-                    mle->u.mleres->lockname.len,
-                    mle->u.mleres->lockname.name, mle->type);
-       }
        assert_spin_locked(&dlm->spinlock);
        assert_spin_locked(&dlm->master_lock);
 
+       __dlm_mle_name(mle, &mname, &mlen, NULL);
+       mlog(0, "Releasing mle for %.*s, type %d\n", mlen, mname, mle->type);
+
        /* remove from list if not already */
        __dlm_unlink_mle(dlm, mle);
 
        /* detach the mle from the domain node up/down events */
        __dlm_mle_detach_hb_events(dlm, mle);
 
+       atomic_dec(&dlm->mle_cur_count[mle->type]);
+
        /* NOTE: kfree under spinlock here.
         * if this is bad, we can move this to a freelist. */
        kmem_cache_free(dlm_mle_cache, mle);
@@ -490,43 +511,6 @@ void dlm_destroy_master_caches(void)
                kmem_cache_destroy(dlm_lockres_cache);
 }
 
-static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
-                                 struct dlm_lock_resource *res,
-                                 u8 owner)
-{
-       assert_spin_locked(&res->spinlock);
-
-       mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
-
-       if (owner == dlm->node_num)
-               atomic_inc(&dlm->local_resources);
-       else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
-               atomic_inc(&dlm->unknown_resources);
-       else
-               atomic_inc(&dlm->remote_resources);
-
-       res->owner = owner;
-}
-
-void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
-                             struct dlm_lock_resource *res, u8 owner)
-{
-       assert_spin_locked(&res->spinlock);
-
-       if (owner == res->owner)
-               return;
-
-       if (res->owner == dlm->node_num)
-               atomic_dec(&dlm->local_resources);
-       else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
-               atomic_dec(&dlm->unknown_resources);
-       else
-               atomic_dec(&dlm->remote_resources);
-
-       dlm_set_lockres_owner(dlm, res, owner);
-}
-
-
 static void dlm_lockres_release(struct kref *kref)
 {
        struct dlm_lock_resource *res;
@@ -552,6 +536,8 @@ static void dlm_lockres_release(struct kref *kref)
        }
        spin_unlock(&dlm->track_lock);
 
+       atomic_dec(&dlm->res_cur_count);
+
        dlm_put(dlm);
 
        if (!hlist_unhashed(&res->hash_node) ||
@@ -632,6 +618,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
 
        kref_init(&res->refs);
 
+       atomic_inc(&dlm->res_tot_count);
+       atomic_inc(&dlm->res_cur_count);
+
        /* just for consistency */
        spin_lock(&res->spinlock);
        dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
@@ -1342,7 +1331,7 @@ static int dlm_do_master_request(struct dlm_lock_resource *res,
 
        BUG_ON(mle->type == DLM_MLE_MIGRATION);
 
-       __dlm_mle_name(mle, &mlename, &mlenamelen);
+       __dlm_mle_name(mle, &mlename, &mlenamelen, NULL);
 
        request.namelen = (u8)mlenamelen;
        memcpy(request.name, mlename, request.namelen);
@@ -3286,8 +3275,11 @@ static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
 
 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
 {
-       struct dlm_master_list_entry *mle, *next;
+       struct dlm_master_list_entry *mle;
        struct dlm_lock_resource *res;
+       struct hlist_head *bucket;
+       struct hlist_node *list;
+       unsigned int i;
 
        mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
 top:
@@ -3295,66 +3287,70 @@ top:
 
        /* clean the master list */
        spin_lock(&dlm->master_lock);
-       list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
-               BUG_ON(mle->type != DLM_MLE_BLOCK &&
-                      mle->type != DLM_MLE_MASTER &&
-                      mle->type != DLM_MLE_MIGRATION);
-
-               /* MASTER mles are initiated locally.  the waiting
-                * process will notice the node map change
-                * shortly.  let that happen as normal. */
-               if (mle->type == DLM_MLE_MASTER)
-                       continue;
-
+       for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+               bucket = dlm_master_hash(dlm, i);
+               hlist_for_each(list, bucket) {
+                       mle = hlist_entry(list, struct dlm_master_list_entry,
+                                         master_hash_node);
+
+                       BUG_ON(mle->type != DLM_MLE_BLOCK &&
+                              mle->type != DLM_MLE_MASTER &&
+                              mle->type != DLM_MLE_MIGRATION);
+
+                       /* MASTER mles are initiated locally. The waiting
+                        * process will notice the node map change shortly.
+                        * Let that happen as normal. */
+                       if (mle->type == DLM_MLE_MASTER)
+                               continue;
+
+                       /* BLOCK mles are initiated by other nodes. Need to
+                        * clean up if the dead node would have been the
+                        * master. */
+                       if (mle->type == DLM_MLE_BLOCK) {
+                               dlm_clean_block_mle(dlm, mle, dead_node);
+                               continue;
+                       }
 
-               /* BLOCK mles are initiated by other nodes.
-                * need to clean up if the dead node would have
-                * been the master. */
-               if (mle->type == DLM_MLE_BLOCK) {
-                       dlm_clean_block_mle(dlm, mle, dead_node);
-                       continue;
+                       /* Everything else is a MIGRATION mle */
+
+                       /* The rule for MIGRATION mles is that the master
+                        * becomes UNKNOWN if *either* the original or the new
+                        * master dies. All UNKNOWN lockres' are sent to
+                        * whichever node becomes the recovery master. The new
+                        * master is responsible for determining if there is
+                        * still a master for this lockres, or if he needs to
+                        * take over mastery. Either way, this node should
+                        * expect another message to resolve this. */
+
+                       if (mle->master != dead_node &&
+                           mle->new_master != dead_node)
+                               continue;
+
+                       /* If we have reached this point, this mle needs to be
+                        * removed from the list and freed. */
+                       dlm_clean_migration_mle(dlm, mle);
+
+                       mlog(0, "%s: node %u died during migration from "
+                            "%u to %u!\n", dlm->name, dead_node, mle->master,
+                            mle->new_master);
+
+                       /* If we find a lockres associated with the mle, we've
+                        * hit this rare case that messes up our lock ordering.
+                        * If so, we need to drop the master lock so that we can
+                        * take the lockres lock, meaning that we will have to
+                        * restart from the head of list. */
+                       res = dlm_reset_mleres_owner(dlm, mle);
+                       if (res)
+                               /* restart */
+                               goto top;
+
+                       /* This may be the last reference */
+                       __dlm_put_mle(mle);
                }
-
-               /* everything else is a MIGRATION mle */
-
-               /* the rule for MIGRATION mles is that the master
-                * becomes UNKNOWN if *either* the original or
-                * the new master dies.  all UNKNOWN lockreses
-                * are sent to whichever node becomes the recovery
-                * master.  the new master is responsible for
-                * determining if there is still a master for
-                * this lockres, or if he needs to take over
-                * mastery.  either way, this node should expect
-                * another message to resolve this. */
-               if (mle->master != dead_node &&
-                   mle->new_master != dead_node)
-                       continue;
-
-               /* if we have reached this point, this mle needs to
-                * be removed from the list and freed. */
-               dlm_clean_migration_mle(dlm, mle);
-
-               mlog(0, "%s: node %u died during migration from "
-                    "%u to %u!\n", dlm->name, dead_node,
-                    mle->master, mle->new_master);
-
-               /* If we find a lockres associated with the mle, we've
-                * hit this rare case that messes up our lock ordering.
-                * If so, we need to drop the master lock so that we can
-                * take the lockres lock, meaning that we will have to
-                * restart from the head of list. */
-               res = dlm_reset_mleres_owner(dlm, mle);
-               if (res)
-                       /* restart */
-                       goto top;
-
-               /* this may be the last reference */
-               __dlm_put_mle(mle);
        }
        spin_unlock(&dlm->master_lock);
 }
 
-
 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
                         u8 old_master)
 {