enum dlm_mle_type {
        DLM_MLE_BLOCK,
        DLM_MLE_MASTER,
-       DLM_MLE_MIGRATION
+       DLM_MLE_MIGRATION,
+       DLM_MLE_NUM_TYPES
 };
 
 struct dlm_lock_name {
        struct list_head mle_hb_events;
 
        /* these give a really vague idea of the system load */
+       atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
+       atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
        atomic_t local_resources;
        atomic_t remote_resources;
        atomic_t unknown_resources;
 
        atomic_set(&dlm->remote_resources, 0);
        atomic_set(&dlm->unknown_resources, 0);
 
+       for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
+               atomic_set(&dlm->mle_tot_count[i], 0);
+               atomic_set(&dlm->mle_cur_count[i], 0);
+       }
+
        spin_lock_init(&dlm->work_lock);
        INIT_LIST_HEAD(&dlm->work_list);
        INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
 
                mle->u.mlename.hash = dlm_lockid_hash(name, namelen);
        }
 
+       atomic_inc(&dlm->mle_tot_count[mle->type]);
+       atomic_inc(&dlm->mle_cur_count[mle->type]);
+
        /* copy off the node_map and register hb callbacks on our copy */
        memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
        memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
        /* detach the mle from the domain node up/down events */
        __dlm_mle_detach_hb_events(dlm, mle);
 
+       atomic_dec(&dlm->mle_cur_count[mle->type]);
+
        /* NOTE: kfree under spinlock here.
         * if this is bad, we can move this to a freelist. */
        kmem_cache_free(dlm_mle_cache, mle);