1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
65 u8 name[DLM_LOCKID_NAME_MAX];
68 struct dlm_master_list_entry
70 struct list_head list;
71 struct list_head hb_events;
78 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
84 enum dlm_mle_type type;
85 struct o2hb_callback_func mle_hb_up;
86 struct o2hb_callback_func mle_hb_down;
88 struct dlm_lock_resource *res;
89 struct dlm_lock_name name;
93 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
94 struct dlm_master_list_entry *mle,
95 struct o2nm_node *node,
97 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
98 struct dlm_master_list_entry *mle,
99 struct o2nm_node *node,
102 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
103 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
104 unsigned int namelen, void *nodemap,
107 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108 struct dlm_master_list_entry *mle,
110 unsigned int namelen)
112 struct dlm_lock_resource *res;
117 if (mle->type == DLM_MLE_BLOCK ||
118 mle->type == DLM_MLE_MIGRATION) {
119 if (namelen != mle->u.name.len ||
120 memcmp(name, mle->u.name.name, namelen)!=0)
124 if (namelen != res->lockname.len ||
125 memcmp(res->lockname.name, name, namelen) != 0)
132 /* Code here is included but defined out as it aids debugging */
134 #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
135 void _dlm_print_nodemap(unsigned long *map, const char *mapname)
138 printk("%s=[ ", mapname);
139 for (i=0; i<O2NM_MAX_NODES; i++)
140 if (test_bit(i, map))
145 void dlm_print_one_mle(struct dlm_master_list_entry *mle)
151 unsigned int namelen;
154 unsigned long *maybe = mle->maybe_map,
155 *vote = mle->vote_map,
156 *resp = mle->response_map,
157 *node = mle->node_map;
160 if (mle->type == DLM_MLE_BLOCK)
162 else if (mle->type == DLM_MLE_MASTER)
166 refs = atomic_read(&k->refcount);
167 master = mle->master;
168 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
170 if (mle->type != DLM_MLE_MASTER) {
171 namelen = mle->u.name.len;
172 name = mle->u.name.name;
174 namelen = mle->u.res->lockname.len;
175 name = mle->u.res->lockname.name;
178 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
179 namelen, name, type, refs, master, mle->new_master, attached,
181 dlm_print_nodemap(maybe);
183 dlm_print_nodemap(vote);
185 dlm_print_nodemap(resp);
187 dlm_print_nodemap(node);
192 static void dlm_dump_mles(struct dlm_ctxt *dlm)
194 struct dlm_master_list_entry *mle;
195 struct list_head *iter;
197 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
198 spin_lock(&dlm->master_lock);
199 list_for_each(iter, &dlm->master_list) {
200 mle = list_entry(iter, struct dlm_master_list_entry, list);
201 dlm_print_one_mle(mle);
203 spin_unlock(&dlm->master_lock);
206 int dlm_dump_all_mles(const char __user *data, unsigned int len)
208 struct list_head *iter;
209 struct dlm_ctxt *dlm;
211 spin_lock(&dlm_domain_lock);
212 list_for_each(iter, &dlm_domains) {
213 dlm = list_entry (iter, struct dlm_ctxt, list);
214 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
217 spin_unlock(&dlm_domain_lock);
220 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
225 static kmem_cache_t *dlm_mle_cache = NULL;
228 static void dlm_mle_release(struct kref *kref);
229 static void dlm_init_mle(struct dlm_master_list_entry *mle,
230 enum dlm_mle_type type,
231 struct dlm_ctxt *dlm,
232 struct dlm_lock_resource *res,
234 unsigned int namelen);
235 static void dlm_put_mle(struct dlm_master_list_entry *mle);
236 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
237 static int dlm_find_mle(struct dlm_ctxt *dlm,
238 struct dlm_master_list_entry **mle,
239 char *name, unsigned int namelen);
241 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
244 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
245 struct dlm_lock_resource *res,
246 struct dlm_master_list_entry *mle,
248 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
249 struct dlm_lock_resource *res,
250 struct dlm_master_list_entry *mle,
252 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
253 struct dlm_lock_resource *res,
254 struct dlm_master_list_entry *mle,
255 struct dlm_master_list_entry **oldmle,
256 const char *name, unsigned int namelen,
257 u8 new_master, u8 master);
259 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
260 struct dlm_lock_resource *res);
261 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
262 struct dlm_lock_resource *res);
263 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
264 struct dlm_lock_resource *res,
266 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
267 struct dlm_lock_resource *res);
270 int dlm_is_host_down(int errno)
287 case -EINVAL: /* if returned from our tcp code,
288 this means there is no socket */
296 * MASTER LIST FUNCTIONS
301 * regarding master list entries and heartbeat callbacks:
303 * in order to avoid sleeping and allocation that occurs in
304 * heartbeat, master list entries are simply attached to the
305 * dlm's established heartbeat callbacks. the mle is attached
306 * when it is created, and since the dlm->spinlock is held at
307 * that time, any heartbeat event will be properly discovered
308 * by the mle. the mle needs to be detached from the
309 * dlm->mle_hb_events list as soon as heartbeat events are no
310 * longer useful to the mle, and before the mle is freed.
312 * as a general rule, heartbeat events are no longer needed by
313 * the mle once an "answer" regarding the lock master has been
316 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
317 struct dlm_master_list_entry *mle)
319 assert_spin_locked(&dlm->spinlock);
321 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
325 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
326 struct dlm_master_list_entry *mle)
328 if (!list_empty(&mle->hb_events))
329 list_del_init(&mle->hb_events);
333 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
334 struct dlm_master_list_entry *mle)
336 spin_lock(&dlm->spinlock);
337 __dlm_mle_detach_hb_events(dlm, mle);
338 spin_unlock(&dlm->spinlock);
341 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
343 struct dlm_ctxt *dlm;
346 assert_spin_locked(&dlm->spinlock);
347 assert_spin_locked(&dlm->master_lock);
349 kref_get(&mle->mle_refs);
352 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
354 struct dlm_ctxt *dlm;
357 spin_lock(&dlm->spinlock);
358 spin_lock(&dlm->master_lock);
361 spin_unlock(&dlm->master_lock);
362 spin_unlock(&dlm->spinlock);
366 /* remove from list and free */
367 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
369 struct dlm_ctxt *dlm;
372 assert_spin_locked(&dlm->spinlock);
373 assert_spin_locked(&dlm->master_lock);
374 if (!atomic_read(&mle->mle_refs.refcount)) {
375 /* this may or may not crash, but who cares.
377 mlog(ML_ERROR, "bad mle: %p\n", mle);
378 dlm_print_one_mle(mle);
381 kref_put(&mle->mle_refs, dlm_mle_release);
385 /* must not have any spinlocks coming in */
386 static void dlm_put_mle(struct dlm_master_list_entry *mle)
388 struct dlm_ctxt *dlm;
391 spin_lock(&dlm->spinlock);
392 spin_lock(&dlm->master_lock);
394 spin_unlock(&dlm->master_lock);
395 spin_unlock(&dlm->spinlock);
398 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
400 kref_get(&mle->mle_refs);
403 static void dlm_init_mle(struct dlm_master_list_entry *mle,
404 enum dlm_mle_type type,
405 struct dlm_ctxt *dlm,
406 struct dlm_lock_resource *res,
408 unsigned int namelen)
410 assert_spin_locked(&dlm->spinlock);
414 INIT_LIST_HEAD(&mle->list);
415 INIT_LIST_HEAD(&mle->hb_events);
416 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
417 spin_lock_init(&mle->spinlock);
418 init_waitqueue_head(&mle->wq);
419 atomic_set(&mle->woken, 0);
420 kref_init(&mle->mle_refs);
421 memset(mle->response_map, 0, sizeof(mle->response_map));
422 mle->master = O2NM_MAX_NODES;
423 mle->new_master = O2NM_MAX_NODES;
426 if (mle->type == DLM_MLE_MASTER) {
429 } else if (mle->type == DLM_MLE_BLOCK) {
431 memcpy(mle->u.name.name, name, namelen);
432 mle->u.name.len = namelen;
433 } else /* DLM_MLE_MIGRATION */ {
435 memcpy(mle->u.name.name, name, namelen);
436 mle->u.name.len = namelen;
439 /* copy off the node_map and register hb callbacks on our copy */
440 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
441 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
442 clear_bit(dlm->node_num, mle->vote_map);
443 clear_bit(dlm->node_num, mle->node_map);
445 /* attach the mle to the domain node up/down events */
446 __dlm_mle_attach_hb_events(dlm, mle);
450 /* returns 1 if found, 0 if not */
451 static int dlm_find_mle(struct dlm_ctxt *dlm,
452 struct dlm_master_list_entry **mle,
453 char *name, unsigned int namelen)
455 struct dlm_master_list_entry *tmpmle;
456 struct list_head *iter;
458 assert_spin_locked(&dlm->master_lock);
460 list_for_each(iter, &dlm->master_list) {
461 tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
462 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
471 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
473 struct dlm_master_list_entry *mle;
474 struct list_head *iter;
476 assert_spin_locked(&dlm->spinlock);
478 list_for_each(iter, &dlm->mle_hb_events) {
479 mle = list_entry(iter, struct dlm_master_list_entry,
482 dlm_mle_node_up(dlm, mle, NULL, idx);
484 dlm_mle_node_down(dlm, mle, NULL, idx);
488 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
489 struct dlm_master_list_entry *mle,
490 struct o2nm_node *node, int idx)
492 spin_lock(&mle->spinlock);
494 if (!test_bit(idx, mle->node_map))
495 mlog(0, "node %u already removed from nodemap!\n", idx);
497 clear_bit(idx, mle->node_map);
499 spin_unlock(&mle->spinlock);
502 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
503 struct dlm_master_list_entry *mle,
504 struct o2nm_node *node, int idx)
506 spin_lock(&mle->spinlock);
508 if (test_bit(idx, mle->node_map))
509 mlog(0, "node %u already in node map!\n", idx);
511 set_bit(idx, mle->node_map);
513 spin_unlock(&mle->spinlock);
517 int dlm_init_mle_cache(void)
519 dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
520 sizeof(struct dlm_master_list_entry),
521 0, SLAB_HWCACHE_ALIGN,
523 if (dlm_mle_cache == NULL)
528 void dlm_destroy_mle_cache(void)
531 kmem_cache_destroy(dlm_mle_cache);
534 static void dlm_mle_release(struct kref *kref)
536 struct dlm_master_list_entry *mle;
537 struct dlm_ctxt *dlm;
541 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
544 if (mle->type != DLM_MLE_MASTER) {
545 mlog(0, "calling mle_release for %.*s, type %d\n",
546 mle->u.name.len, mle->u.name.name, mle->type);
548 mlog(0, "calling mle_release for %.*s, type %d\n",
549 mle->u.res->lockname.len,
550 mle->u.res->lockname.name, mle->type);
552 assert_spin_locked(&dlm->spinlock);
553 assert_spin_locked(&dlm->master_lock);
555 /* remove from list if not already */
556 if (!list_empty(&mle->list))
557 list_del_init(&mle->list);
559 /* detach the mle from the domain node up/down events */
560 __dlm_mle_detach_hb_events(dlm, mle);
562 /* NOTE: kfree under spinlock here.
563 * if this is bad, we can move this to a freelist. */
564 kmem_cache_free(dlm_mle_cache, mle);
569 * LOCK RESOURCE FUNCTIONS
572 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
573 struct dlm_lock_resource *res,
576 assert_spin_locked(&res->spinlock);
578 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
580 if (owner == dlm->node_num)
581 atomic_inc(&dlm->local_resources);
582 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
583 atomic_inc(&dlm->unknown_resources);
585 atomic_inc(&dlm->remote_resources);
590 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
591 struct dlm_lock_resource *res, u8 owner)
593 assert_spin_locked(&res->spinlock);
595 if (owner == res->owner)
598 if (res->owner == dlm->node_num)
599 atomic_dec(&dlm->local_resources);
600 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
601 atomic_dec(&dlm->unknown_resources);
603 atomic_dec(&dlm->remote_resources);
605 dlm_set_lockres_owner(dlm, res, owner);
609 static void dlm_lockres_release(struct kref *kref)
611 struct dlm_lock_resource *res;
613 res = container_of(kref, struct dlm_lock_resource, refs);
615 /* This should not happen -- all lockres' have a name
616 * associated with them at init time. */
617 BUG_ON(!res->lockname.name);
619 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
622 if (!hlist_unhashed(&res->hash_node) ||
623 !list_empty(&res->granted) ||
624 !list_empty(&res->converting) ||
625 !list_empty(&res->blocked) ||
626 !list_empty(&res->dirty) ||
627 !list_empty(&res->recovering) ||
628 !list_empty(&res->purge)) {
630 "Going to BUG for resource %.*s."
631 " We're on a list! [%c%c%c%c%c%c%c]\n",
632 res->lockname.len, res->lockname.name,
633 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
634 !list_empty(&res->granted) ? 'G' : ' ',
635 !list_empty(&res->converting) ? 'C' : ' ',
636 !list_empty(&res->blocked) ? 'B' : ' ',
637 !list_empty(&res->dirty) ? 'D' : ' ',
638 !list_empty(&res->recovering) ? 'R' : ' ',
639 !list_empty(&res->purge) ? 'P' : ' ');
641 dlm_print_one_lock_resource(res);
644 /* By the time we're ready to blow this guy away, we shouldn't
645 * be on any lists. */
646 BUG_ON(!hlist_unhashed(&res->hash_node));
647 BUG_ON(!list_empty(&res->granted));
648 BUG_ON(!list_empty(&res->converting));
649 BUG_ON(!list_empty(&res->blocked));
650 BUG_ON(!list_empty(&res->dirty));
651 BUG_ON(!list_empty(&res->recovering));
652 BUG_ON(!list_empty(&res->purge));
654 kfree(res->lockname.name);
659 void dlm_lockres_put(struct dlm_lock_resource *res)
661 kref_put(&res->refs, dlm_lockres_release);
664 static void dlm_init_lockres(struct dlm_ctxt *dlm,
665 struct dlm_lock_resource *res,
666 const char *name, unsigned int namelen)
670 /* If we memset here, we lose our reference to the kmalloc'd
671 * res->lockname.name, so be sure to init every field
674 qname = (char *) res->lockname.name;
675 memcpy(qname, name, namelen);
677 res->lockname.len = namelen;
678 res->lockname.hash = dlm_lockid_hash(name, namelen);
680 init_waitqueue_head(&res->wq);
681 spin_lock_init(&res->spinlock);
682 INIT_HLIST_NODE(&res->hash_node);
683 INIT_LIST_HEAD(&res->granted);
684 INIT_LIST_HEAD(&res->converting);
685 INIT_LIST_HEAD(&res->blocked);
686 INIT_LIST_HEAD(&res->dirty);
687 INIT_LIST_HEAD(&res->recovering);
688 INIT_LIST_HEAD(&res->purge);
689 atomic_set(&res->asts_reserved, 0);
690 res->migration_pending = 0;
692 kref_init(&res->refs);
694 /* just for consistency */
695 spin_lock(&res->spinlock);
696 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
697 spin_unlock(&res->spinlock);
699 res->state = DLM_LOCK_RES_IN_PROGRESS;
703 memset(res->lvb, 0, DLM_LVB_LEN);
706 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
708 unsigned int namelen)
710 struct dlm_lock_resource *res;
712 res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
716 res->lockname.name = kmalloc(namelen, GFP_KERNEL);
717 if (!res->lockname.name) {
722 dlm_init_lockres(dlm, res, name, namelen);
727 * lookup a lock resource by name.
728 * may already exist in the hashtable.
729 * lockid is null terminated
731 * if not, allocate enough for the lockres and for
732 * the temporary structure used in doing the mastering.
734 * also, do a lookup in the dlm->master_list to see
735 * if another node has begun mastering the same lock.
736 * if so, there should be a block entry in there
737 * for this name, and we should *not* attempt to master
738 * the lock here. need to wait around for that node
739 * to assert_master (or die).
742 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
746 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
747 struct dlm_master_list_entry *mle = NULL;
748 struct dlm_master_list_entry *alloc_mle = NULL;
751 struct dlm_node_iter iter;
752 unsigned int namelen, hash;
754 int bit, wait_on_recovery = 0;
758 namelen = strlen(lockid);
759 hash = dlm_lockid_hash(lockid, namelen);
761 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
764 spin_lock(&dlm->spinlock);
765 tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
767 spin_unlock(&dlm->spinlock);
768 mlog(0, "found in hash!\n");
770 dlm_lockres_put(res);
776 spin_unlock(&dlm->spinlock);
777 mlog(0, "allocating a new resource\n");
778 /* nothing found and we need to allocate one. */
779 alloc_mle = (struct dlm_master_list_entry *)
780 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
783 res = dlm_new_lockres(dlm, lockid, namelen);
789 mlog(0, "no lockres found, allocated our own: %p\n", res);
791 if (flags & LKM_LOCAL) {
792 /* caller knows it's safe to assume it's not mastered elsewhere
793 * DONE! return right away */
794 spin_lock(&res->spinlock);
795 dlm_change_lockres_owner(dlm, res, dlm->node_num);
796 __dlm_insert_lockres(dlm, res);
797 spin_unlock(&res->spinlock);
798 spin_unlock(&dlm->spinlock);
799 /* lockres still marked IN_PROGRESS */
803 /* check master list to see if another node has started mastering it */
804 spin_lock(&dlm->master_lock);
806 /* if we found a block, wait for lock to be mastered by another node */
807 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
809 if (mle->type == DLM_MLE_MASTER) {
810 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
812 } else if (mle->type == DLM_MLE_MIGRATION) {
813 /* migration is in progress! */
814 /* the good news is that we now know the
815 * "current" master (mle->master). */
817 spin_unlock(&dlm->master_lock);
818 assert_spin_locked(&dlm->spinlock);
820 /* set the lockres owner and hash it */
821 spin_lock(&res->spinlock);
822 dlm_set_lockres_owner(dlm, res, mle->master);
823 __dlm_insert_lockres(dlm, res);
824 spin_unlock(&res->spinlock);
825 spin_unlock(&dlm->spinlock);
827 /* master is known, detach */
828 dlm_mle_detach_hb_events(dlm, mle);
834 /* go ahead and try to master lock on this node */
836 /* make sure this does not get freed below */
838 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
839 set_bit(dlm->node_num, mle->maybe_map);
840 list_add(&mle->list, &dlm->master_list);
842 /* still holding the dlm spinlock, check the recovery map
843 * to see if there are any nodes that still need to be
844 * considered. these will not appear in the mle nodemap
845 * but they might own this lockres. wait on them. */
846 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
847 if (bit < O2NM_MAX_NODES) {
848 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
849 "recover before lock mastery can begin\n",
850 dlm->name, namelen, (char *)lockid, bit);
851 wait_on_recovery = 1;
855 /* at this point there is either a DLM_MLE_BLOCK or a
856 * DLM_MLE_MASTER on the master list, so it's safe to add the
857 * lockres to the hashtable. anyone who finds the lock will
858 * still have to wait on the IN_PROGRESS. */
860 /* finally add the lockres to its hash bucket */
861 __dlm_insert_lockres(dlm, res);
862 /* get an extra ref on the mle in case this is a BLOCK
863 * if so, the creator of the BLOCK may try to put the last
864 * ref at this time in the assert master handler, so we
865 * need an extra one to keep from a bad ptr deref. */
866 dlm_get_mle_inuse(mle);
867 spin_unlock(&dlm->master_lock);
868 spin_unlock(&dlm->spinlock);
870 while (wait_on_recovery) {
871 /* any cluster changes that occurred after dropping the
872 * dlm spinlock would be detectable be a change on the mle,
873 * so we only need to clear out the recovery map once. */
874 if (dlm_is_recovery_lock(lockid, namelen)) {
875 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
876 "must master $RECOVERY lock now\n", dlm->name);
877 if (!dlm_pre_master_reco_lockres(dlm, res))
878 wait_on_recovery = 0;
880 mlog(0, "%s: waiting 500ms for heartbeat state "
881 "change\n", dlm->name);
887 dlm_kick_recovery_thread(dlm);
889 dlm_wait_for_recovery(dlm);
891 spin_lock(&dlm->spinlock);
892 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
893 if (bit < O2NM_MAX_NODES) {
894 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
895 "recover before lock mastery can begin\n",
896 dlm->name, namelen, (char *)lockid, bit);
897 wait_on_recovery = 1;
899 wait_on_recovery = 0;
900 spin_unlock(&dlm->spinlock);
903 /* must wait for lock to be mastered elsewhere */
909 dlm_node_iter_init(mle->vote_map, &iter);
910 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
911 ret = dlm_do_master_request(mle, nodenum);
914 if (mle->master != O2NM_MAX_NODES) {
915 /* found a master ! */
916 if (mle->master <= nodenum)
918 /* if our master request has not reached the master
919 * yet, keep going until it does. this is how the
920 * master will know that asserts are needed back to
921 * the lower nodes. */
922 mlog(0, "%s:%.*s: requests only up to %u but master "
923 "is %u, keep going\n", dlm->name, namelen,
924 lockid, nodenum, mle->master);
929 /* keep going until the response map includes all nodes */
930 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
932 mlog(0, "%s:%.*s: node map changed, redo the "
933 "master request now, blocked=%d\n",
934 dlm->name, res->lockname.len,
935 res->lockname.name, blocked);
937 mlog(ML_ERROR, "%s:%.*s: spinning on "
938 "dlm_wait_for_lock_mastery, blocked=%d\n",
939 dlm->name, res->lockname.len,
940 res->lockname.name, blocked);
941 dlm_print_one_lock_resource(res);
942 /* dlm_print_one_mle(mle); */
948 mlog(0, "lockres mastered by %u\n", res->owner);
949 /* make sure we never continue without this */
950 BUG_ON(res->owner == O2NM_MAX_NODES);
952 /* master is known, detach if not already detached */
953 dlm_mle_detach_hb_events(dlm, mle);
955 /* put the extra ref */
956 dlm_put_mle_inuse(mle);
959 spin_lock(&res->spinlock);
960 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
961 spin_unlock(&res->spinlock);
965 /* need to free the unused mle */
967 kmem_cache_free(dlm_mle_cache, alloc_mle);
973 #define DLM_MASTERY_TIMEOUT_MS 5000
975 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
976 struct dlm_lock_resource *res,
977 struct dlm_master_list_entry *mle,
982 int map_changed, voting_done;
989 /* check if another node has already become the owner */
990 spin_lock(&res->spinlock);
991 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
992 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
993 res->lockname.len, res->lockname.name, res->owner);
994 spin_unlock(&res->spinlock);
995 /* this will cause the master to re-assert across
996 * the whole cluster, freeing up mles */
997 ret = dlm_do_master_request(mle, res->owner);
999 /* give recovery a chance to run */
1000 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1007 spin_unlock(&res->spinlock);
1009 spin_lock(&mle->spinlock);
1011 map_changed = (memcmp(mle->vote_map, mle->node_map,
1012 sizeof(mle->vote_map)) != 0);
1013 voting_done = (memcmp(mle->vote_map, mle->response_map,
1014 sizeof(mle->vote_map)) == 0);
1016 /* restart if we hit any errors */
1019 mlog(0, "%s: %.*s: node map changed, restarting\n",
1020 dlm->name, res->lockname.len, res->lockname.name);
1021 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1022 b = (mle->type == DLM_MLE_BLOCK);
1023 if ((*blocked && !b) || (!*blocked && b)) {
1024 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1025 dlm->name, res->lockname.len, res->lockname.name,
1029 spin_unlock(&mle->spinlock);
1034 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1035 "rechecking now\n", dlm->name, res->lockname.len,
1036 res->lockname.name);
1040 mlog(0, "map not changed and voting not done "
1041 "for %s:%.*s\n", dlm->name, res->lockname.len,
1042 res->lockname.name);
1046 if (m != O2NM_MAX_NODES) {
1047 /* another node has done an assert!
1052 /* have all nodes responded? */
1053 if (voting_done && !*blocked) {
1054 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1055 if (dlm->node_num <= bit) {
1056 /* my node number is lowest.
1057 * now tell other nodes that I am
1058 * mastering this. */
1059 mle->master = dlm->node_num;
1063 /* if voting is done, but we have not received
1064 * an assert master yet, we must sleep */
1068 spin_unlock(&mle->spinlock);
1070 /* sleep if we haven't finished voting yet */
1072 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1075 if (atomic_read(&mle->mle_refs.refcount) < 2)
1076 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1077 atomic_read(&mle->mle_refs.refcount),
1078 res->lockname.len, res->lockname.name);
1080 atomic_set(&mle->woken, 0);
1081 (void)wait_event_timeout(mle->wq,
1082 (atomic_read(&mle->woken) == 1),
1084 if (res->owner == O2NM_MAX_NODES) {
1085 mlog(0, "waiting again\n");
1088 mlog(0, "done waiting, master is %u\n", res->owner);
1096 mlog(0, "about to master %.*s here, this=%u\n",
1097 res->lockname.len, res->lockname.name, m);
1098 ret = dlm_do_assert_master(dlm, res->lockname.name,
1099 res->lockname.len, mle->vote_map, 0);
1101 /* This is a failure in the network path,
1102 * not in the response to the assert_master
1103 * (any nonzero response is a BUG on this node).
1104 * Most likely a socket just got disconnected
1105 * due to node death. */
1108 /* no longer need to restart lock mastery.
1109 * all living nodes have been contacted. */
1113 /* set the lockres owner */
1114 spin_lock(&res->spinlock);
1115 dlm_change_lockres_owner(dlm, res, m);
1116 spin_unlock(&res->spinlock);
1122 struct dlm_bitmap_diff_iter
1125 unsigned long *orig_bm;
1126 unsigned long *cur_bm;
1127 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1130 enum dlm_node_state_change
1137 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1138 unsigned long *orig_bm,
1139 unsigned long *cur_bm)
1141 unsigned long p1, p2;
1145 iter->orig_bm = orig_bm;
1146 iter->cur_bm = cur_bm;
1148 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1149 p1 = *(iter->orig_bm + i);
1150 p2 = *(iter->cur_bm + i);
1151 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1155 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1156 enum dlm_node_state_change *state)
1160 if (iter->curnode >= O2NM_MAX_NODES)
1163 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1165 if (bit >= O2NM_MAX_NODES) {
1166 iter->curnode = O2NM_MAX_NODES;
1170 /* if it was there in the original then this node died */
1171 if (test_bit(bit, iter->orig_bm))
1176 iter->curnode = bit;
1181 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1182 struct dlm_lock_resource *res,
1183 struct dlm_master_list_entry *mle,
1186 struct dlm_bitmap_diff_iter bdi;
1187 enum dlm_node_state_change sc;
1191 mlog(0, "something happened such that the "
1192 "master process may need to be restarted!\n");
1194 assert_spin_locked(&mle->spinlock);
1196 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1197 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1199 if (sc == NODE_UP) {
1200 /* a node came up. clear any old vote from
1201 * the response map and set it in the vote map
1202 * then restart the mastery. */
1203 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1205 /* redo the master request, but only for the new node */
1206 mlog(0, "sending request to new node\n");
1207 clear_bit(node, mle->response_map);
1208 set_bit(node, mle->vote_map);
1210 mlog(ML_ERROR, "node down! %d\n", node);
1212 /* if the node wasn't involved in mastery skip it,
1213 * but clear it out from the maps so that it will
1214 * not affect mastery of this lockres */
1215 clear_bit(node, mle->response_map);
1216 clear_bit(node, mle->vote_map);
1217 if (!test_bit(node, mle->maybe_map))
1220 /* if we're already blocked on lock mastery, and the
1221 * dead node wasn't the expected master, or there is
1222 * another node in the maybe_map, keep waiting */
1224 int lowest = find_next_bit(mle->maybe_map,
1227 /* act like it was never there */
1228 clear_bit(node, mle->maybe_map);
1233 mlog(ML_ERROR, "expected master %u died while "
1234 "this node was blocked waiting on it!\n",
1236 lowest = find_next_bit(mle->maybe_map,
1239 if (lowest < O2NM_MAX_NODES) {
1240 mlog(0, "still blocked. waiting "
1241 "on %u now\n", lowest);
1245 /* mle is an MLE_BLOCK, but there is now
1246 * nothing left to block on. we need to return
1247 * all the way back out and try again with
1248 * an MLE_MASTER. dlm_do_local_recovery_cleanup
1249 * has already run, so the mle refcount is ok */
1250 mlog(0, "no longer blocking. we can "
1251 "try to master this here\n");
1252 mle->type = DLM_MLE_MASTER;
1253 memset(mle->maybe_map, 0,
1254 sizeof(mle->maybe_map));
1255 memset(mle->response_map, 0,
1256 sizeof(mle->maybe_map));
1257 memcpy(mle->vote_map, mle->node_map,
1258 sizeof(mle->node_map));
1260 set_bit(dlm->node_num, mle->maybe_map);
1266 clear_bit(node, mle->maybe_map);
1267 if (node > dlm->node_num)
1270 mlog(0, "dead node in map!\n");
1271 /* yuck. go back and re-contact all nodes
1272 * in the vote_map, removing this node. */
1273 memset(mle->response_map, 0,
1274 sizeof(mle->response_map));
1278 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1285 * DLM_MASTER_REQUEST_MSG
1287 * returns: 0 on success,
1288 * -errno on a network error
1290 * on error, the caller should assume the target node is "dead"
1294 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
1296 struct dlm_ctxt *dlm = mle->dlm;
1297 struct dlm_master_request request;
1298 int ret, response=0, resend;
1300 memset(&request, 0, sizeof(request));
1301 request.node_idx = dlm->node_num;
1303 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1305 if (mle->type != DLM_MLE_MASTER) {
1306 request.namelen = mle->u.name.len;
1307 memcpy(request.name, mle->u.name.name, request.namelen);
1309 request.namelen = mle->u.res->lockname.len;
1310 memcpy(request.name, mle->u.res->lockname.name,
1315 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1316 sizeof(request), to, &response);
1318 if (ret == -ESRCH) {
1319 /* should never happen */
1320 mlog(ML_ERROR, "TCP stack not ready!\n");
1322 } else if (ret == -EINVAL) {
1323 mlog(ML_ERROR, "bad args passed to o2net!\n");
1325 } else if (ret == -ENOMEM) {
1326 mlog(ML_ERROR, "out of memory while trying to send "
1327 "network message! retrying\n");
1328 /* this is totally crude */
1331 } else if (!dlm_is_host_down(ret)) {
1332 /* not a network error. bad. */
1334 mlog(ML_ERROR, "unhandled error!");
1337 /* all other errors should be network errors,
1338 * and likely indicate node death */
1339 mlog(ML_ERROR, "link to %d went down!\n", to);
1345 spin_lock(&mle->spinlock);
1347 case DLM_MASTER_RESP_YES:
1348 set_bit(to, mle->response_map);
1349 mlog(0, "node %u is the master, response=YES\n", to);
1352 case DLM_MASTER_RESP_NO:
1353 mlog(0, "node %u not master, response=NO\n", to);
1354 set_bit(to, mle->response_map);
1356 case DLM_MASTER_RESP_MAYBE:
1357 mlog(0, "node %u not master, response=MAYBE\n", to);
1358 set_bit(to, mle->response_map);
1359 set_bit(to, mle->maybe_map);
1361 case DLM_MASTER_RESP_ERROR:
1362 mlog(0, "node %u hit an error, resending\n", to);
1367 mlog(ML_ERROR, "bad response! %u\n", response);
1370 spin_unlock(&mle->spinlock);
1372 /* this is also totally crude */
1382 * locks that can be taken here:
1388 * if possible, TRIM THIS DOWN!!!
1390 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1392 u8 response = DLM_MASTER_RESP_MAYBE;
1393 struct dlm_ctxt *dlm = data;
1394 struct dlm_lock_resource *res = NULL;
1395 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1396 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1398 unsigned int namelen, hash;
1401 int dispatch_assert = 0;
1404 return DLM_MASTER_RESP_NO;
1406 if (!dlm_domain_fully_joined(dlm)) {
1407 response = DLM_MASTER_RESP_NO;
1411 name = request->name;
1412 namelen = request->namelen;
1413 hash = dlm_lockid_hash(name, namelen);
1415 if (namelen > DLM_LOCKID_NAME_MAX) {
1416 response = DLM_IVBUFLEN;
1421 spin_lock(&dlm->spinlock);
1422 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1424 spin_unlock(&dlm->spinlock);
1426 /* take care of the easy cases up front */
1427 spin_lock(&res->spinlock);
1428 if (res->state & DLM_LOCK_RES_RECOVERING) {
1429 spin_unlock(&res->spinlock);
1430 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1431 "being recovered\n");
1432 response = DLM_MASTER_RESP_ERROR;
1434 kmem_cache_free(dlm_mle_cache, mle);
1438 if (res->owner == dlm->node_num) {
1439 spin_unlock(&res->spinlock);
1440 // mlog(0, "this node is the master\n");
1441 response = DLM_MASTER_RESP_YES;
1443 kmem_cache_free(dlm_mle_cache, mle);
1445 /* this node is the owner.
1446 * there is some extra work that needs to
1447 * happen now. the requesting node has
1448 * caused all nodes up to this one to
1449 * create mles. this node now needs to
1450 * go back and clean those up. */
1451 dispatch_assert = 1;
1453 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1454 spin_unlock(&res->spinlock);
1455 // mlog(0, "node %u is the master\n", res->owner);
1456 response = DLM_MASTER_RESP_NO;
1458 kmem_cache_free(dlm_mle_cache, mle);
1462 /* ok, there is no owner. either this node is
1463 * being blocked, or it is actively trying to
1464 * master this lock. */
1465 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1466 mlog(ML_ERROR, "lock with no owner should be "
1471 // mlog(0, "lockres is in progress...\n");
1472 spin_lock(&dlm->master_lock);
1473 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1475 mlog(ML_ERROR, "no mle found for this lock!\n");
1479 spin_lock(&tmpmle->spinlock);
1480 if (tmpmle->type == DLM_MLE_BLOCK) {
1481 // mlog(0, "this node is waiting for "
1482 // "lockres to be mastered\n");
1483 response = DLM_MASTER_RESP_NO;
1484 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1485 mlog(0, "node %u is master, but trying to migrate to "
1486 "node %u.\n", tmpmle->master, tmpmle->new_master);
1487 if (tmpmle->master == dlm->node_num) {
1488 response = DLM_MASTER_RESP_YES;
1489 mlog(ML_ERROR, "no owner on lockres, but this "
1490 "node is trying to migrate it to %u?!\n",
1491 tmpmle->new_master);
1494 /* the real master can respond on its own */
1495 response = DLM_MASTER_RESP_NO;
1497 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1499 if (tmpmle->master == dlm->node_num) {
1500 response = DLM_MASTER_RESP_YES;
1501 /* this node will be the owner.
1502 * go back and clean the mles on any
1504 dispatch_assert = 1;
1506 response = DLM_MASTER_RESP_NO;
1508 // mlog(0, "this node is attempting to "
1509 // "master lockres\n");
1510 response = DLM_MASTER_RESP_MAYBE;
1513 set_bit(request->node_idx, tmpmle->maybe_map);
1514 spin_unlock(&tmpmle->spinlock);
1516 spin_unlock(&dlm->master_lock);
1517 spin_unlock(&res->spinlock);
1519 /* keep the mle attached to heartbeat events */
1520 dlm_put_mle(tmpmle);
1522 kmem_cache_free(dlm_mle_cache, mle);
1527 * lockres doesn't exist on this node
1528 * if there is an MLE_BLOCK, return NO
1529 * if there is an MLE_MASTER, return MAYBE
1530 * otherwise, add an MLE_BLOCK, return NO
1532 spin_lock(&dlm->master_lock);
1533 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1535 /* this lockid has never been seen on this node yet */
1536 // mlog(0, "no mle found\n");
1538 spin_unlock(&dlm->master_lock);
1539 spin_unlock(&dlm->spinlock);
1541 mle = (struct dlm_master_list_entry *)
1542 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
1544 response = DLM_MASTER_RESP_ERROR;
1545 mlog_errno(-ENOMEM);
1551 // mlog(0, "this is second time thru, already allocated, "
1552 // "add the block.\n");
1553 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1554 set_bit(request->node_idx, mle->maybe_map);
1555 list_add(&mle->list, &dlm->master_list);
1556 response = DLM_MASTER_RESP_NO;
1558 // mlog(0, "mle was found\n");
1560 spin_lock(&tmpmle->spinlock);
1561 if (tmpmle->master == dlm->node_num) {
1562 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1565 if (tmpmle->type == DLM_MLE_BLOCK)
1566 response = DLM_MASTER_RESP_NO;
1567 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1568 mlog(0, "migration mle was found (%u->%u)\n",
1569 tmpmle->master, tmpmle->new_master);
1570 /* real master can respond on its own */
1571 response = DLM_MASTER_RESP_NO;
1573 response = DLM_MASTER_RESP_MAYBE;
1575 set_bit(request->node_idx, tmpmle->maybe_map);
1576 spin_unlock(&tmpmle->spinlock);
1578 spin_unlock(&dlm->master_lock);
1579 spin_unlock(&dlm->spinlock);
1582 /* keep the mle attached to heartbeat events */
1583 dlm_put_mle(tmpmle);
1587 if (dispatch_assert) {
1588 if (response != DLM_MASTER_RESP_YES)
1589 mlog(ML_ERROR, "invalid response %d\n", response);
1591 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1594 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1595 dlm->node_num, res->lockname.len, res->lockname.name);
1596 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1597 DLM_ASSERT_MASTER_MLE_CLEANUP);
1599 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1600 response = DLM_MASTER_RESP_ERROR;
1609 * DLM_ASSERT_MASTER_MSG
1614 * NOTE: this can be used for debugging
1615 * can periodically run all locks owned by this node
1616 * and re-assert across the cluster...
1618 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
1619 unsigned int namelen, void *nodemap,
1622 struct dlm_assert_master assert;
1624 struct dlm_node_iter iter;
1628 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1632 /* note that if this nodemap is empty, it returns 0 */
1633 dlm_node_iter_init(nodemap, &iter);
1634 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1636 struct dlm_master_list_entry *mle = NULL;
1638 mlog(0, "sending assert master to %d (%.*s)\n", to,
1640 memset(&assert, 0, sizeof(assert));
1641 assert.node_idx = dlm->node_num;
1642 assert.namelen = namelen;
1643 memcpy(assert.name, lockname, namelen);
1644 assert.flags = cpu_to_be32(flags);
1646 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1647 &assert, sizeof(assert), to, &r);
1649 mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
1650 if (!dlm_is_host_down(tmpret)) {
1651 mlog(ML_ERROR, "unhandled error!\n");
1654 /* a node died. finish out the rest of the nodes. */
1655 mlog(ML_ERROR, "link to %d went down!\n", to);
1656 /* any nonzero status return will do */
1659 /* ok, something horribly messed. kill thyself. */
1660 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1661 "got %d.\n", namelen, lockname, to, r);
1662 spin_lock(&dlm->spinlock);
1663 spin_lock(&dlm->master_lock);
1664 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1666 dlm_print_one_mle(mle);
1669 spin_unlock(&dlm->master_lock);
1670 spin_unlock(&dlm->spinlock);
1672 } else if (r == EAGAIN) {
1673 mlog(0, "%.*s: node %u create mles on other "
1674 "nodes and requests a re-assert\n",
1675 namelen, lockname, to);
1687 * locks that can be taken here:
1693 * if possible, TRIM THIS DOWN!!!
1695 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1697 struct dlm_ctxt *dlm = data;
1698 struct dlm_master_list_entry *mle = NULL;
1699 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1700 struct dlm_lock_resource *res = NULL;
1702 unsigned int namelen, hash;
1704 int master_request = 0;
1710 name = assert->name;
1711 namelen = assert->namelen;
1712 hash = dlm_lockid_hash(name, namelen);
1713 flags = be32_to_cpu(assert->flags);
1715 if (namelen > DLM_LOCKID_NAME_MAX) {
1716 mlog(ML_ERROR, "Invalid name length!");
1720 spin_lock(&dlm->spinlock);
1723 mlog(0, "assert_master with flags: %u\n", flags);
1726 spin_lock(&dlm->master_lock);
1727 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1728 /* not an error, could be master just re-asserting */
1729 mlog(0, "just got an assert_master from %u, but no "
1730 "MLE for it! (%.*s)\n", assert->node_idx,
1733 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1734 if (bit >= O2NM_MAX_NODES) {
1735 /* not necessarily an error, though less likely.
1736 * could be master just re-asserting. */
1737 mlog(0, "no bits set in the maybe_map, but %u "
1738 "is asserting! (%.*s)\n", assert->node_idx,
1740 } else if (bit != assert->node_idx) {
1741 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1742 mlog(0, "master %u was found, %u should "
1743 "back off\n", assert->node_idx, bit);
1745 /* with the fix for bug 569, a higher node
1746 * number winning the mastery will respond
1747 * YES to mastery requests, but this node
1748 * had no way of knowing. let it pass. */
1749 mlog(0, "%u is the lowest node, "
1750 "%u is asserting. (%.*s) %u must "
1751 "have begun after %u won.\n", bit,
1752 assert->node_idx, namelen, name, bit,
1756 if (mle->type == DLM_MLE_MIGRATION) {
1757 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1758 mlog(0, "%s:%.*s: got cleanup assert"
1759 " from %u for migration\n",
1760 dlm->name, namelen, name,
1762 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1763 mlog(0, "%s:%.*s: got unrelated assert"
1764 " from %u for migration, ignoring\n",
1765 dlm->name, namelen, name,
1768 spin_unlock(&dlm->master_lock);
1769 spin_unlock(&dlm->spinlock);
1774 spin_unlock(&dlm->master_lock);
1776 /* ok everything checks out with the MLE
1777 * now check to see if there is a lockres */
1778 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1780 spin_lock(&res->spinlock);
1781 if (res->state & DLM_LOCK_RES_RECOVERING) {
1782 mlog(ML_ERROR, "%u asserting but %.*s is "
1783 "RECOVERING!\n", assert->node_idx, namelen, name);
1787 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1788 res->owner != assert->node_idx) {
1789 mlog(ML_ERROR, "assert_master from "
1790 "%u, but current owner is "
1792 assert->node_idx, res->owner,
1796 } else if (mle->type != DLM_MLE_MIGRATION) {
1797 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1798 /* owner is just re-asserting */
1799 if (res->owner == assert->node_idx) {
1800 mlog(0, "owner %u re-asserting on "
1801 "lock %.*s\n", assert->node_idx,
1805 mlog(ML_ERROR, "got assert_master from "
1806 "node %u, but %u is the owner! "
1807 "(%.*s)\n", assert->node_idx,
1808 res->owner, namelen, name);
1811 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1812 mlog(ML_ERROR, "got assert from %u, but lock "
1813 "with no owner should be "
1814 "in-progress! (%.*s)\n",
1819 } else /* mle->type == DLM_MLE_MIGRATION */ {
1820 /* should only be getting an assert from new master */
1821 if (assert->node_idx != mle->new_master) {
1822 mlog(ML_ERROR, "got assert from %u, but "
1823 "new master is %u, and old master "
1825 assert->node_idx, mle->new_master,
1826 mle->master, namelen, name);
1832 spin_unlock(&res->spinlock);
1834 spin_unlock(&dlm->spinlock);
1836 // mlog(0, "woo! got an assert_master from node %u!\n",
1837 // assert->node_idx);
1843 spin_lock(&mle->spinlock);
1844 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1847 /* MASTER mle: if any bits set in the response map
1848 * then the calling node needs to re-assert to clear
1849 * up nodes that this node contacted */
1850 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1851 nn+1)) < O2NM_MAX_NODES) {
1852 if (nn != dlm->node_num && nn != assert->node_idx)
1856 mle->master = assert->node_idx;
1857 atomic_set(&mle->woken, 1);
1859 spin_unlock(&mle->spinlock);
1862 spin_lock(&res->spinlock);
1863 if (mle->type == DLM_MLE_MIGRATION) {
1864 mlog(0, "finishing off migration of lockres %.*s, "
1866 res->lockname.len, res->lockname.name,
1867 dlm->node_num, mle->new_master);
1868 res->state &= ~DLM_LOCK_RES_MIGRATING;
1869 dlm_change_lockres_owner(dlm, res, mle->new_master);
1870 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1872 dlm_change_lockres_owner(dlm, res, mle->master);
1874 spin_unlock(&res->spinlock);
1877 /* master is known, detach if not already detached.
1878 * ensures that only one assert_master call will happen
1880 spin_lock(&dlm->spinlock);
1881 spin_lock(&dlm->master_lock);
1883 rr = atomic_read(&mle->mle_refs.refcount);
1884 if (mle->inuse > 0) {
1885 if (extra_ref && rr < 3)
1887 else if (!extra_ref && rr < 2)
1890 if (extra_ref && rr < 2)
1892 else if (!extra_ref && rr < 1)
1896 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1897 "that will mess up this node, refs=%d, extra=%d, "
1898 "inuse=%d\n", dlm->name, namelen, name,
1899 assert->node_idx, rr, extra_ref, mle->inuse);
1900 dlm_print_one_mle(mle);
1902 list_del_init(&mle->list);
1903 __dlm_mle_detach_hb_events(dlm, mle);
1906 /* the assert master message now balances the extra
1907 * ref given by the master / migration request message.
1908 * if this is the last put, it will be removed
1912 spin_unlock(&dlm->master_lock);
1913 spin_unlock(&dlm->spinlock);
1915 if (res->owner != assert->node_idx) {
1916 mlog(0, "assert_master from %u, but current "
1917 "owner is %u (%.*s), no mle\n", assert->node_idx,
1918 res->owner, namelen, name);
1925 dlm_lockres_put(res);
1927 if (master_request) {
1928 mlog(0, "need to tell master to reassert\n");
1929 ret = EAGAIN; // positive. negative would shoot down the node.
1934 /* kill the caller! */
1935 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1936 "and killing the other node now! This node is OK and can continue.\n");
1937 __dlm_print_one_lock_resource(res);
1938 spin_unlock(&res->spinlock);
1939 spin_unlock(&dlm->spinlock);
1940 dlm_lockres_put(res);
1945 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1946 struct dlm_lock_resource *res,
1947 int ignore_higher, u8 request_from, u32 flags)
1949 struct dlm_work_item *item;
1950 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1955 /* queue up work for dlm_assert_master_worker */
1956 dlm_grab(dlm); /* get an extra ref for the work item */
1957 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
1958 item->u.am.lockres = res; /* already have a ref */
1959 /* can optionally ignore node numbers higher than this node */
1960 item->u.am.ignore_higher = ignore_higher;
1961 item->u.am.request_from = request_from;
1962 item->u.am.flags = flags;
1965 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
1966 res->lockname.name);
1968 spin_lock(&dlm->work_lock);
1969 list_add_tail(&item->list, &dlm->work_list);
1970 spin_unlock(&dlm->work_lock);
1972 schedule_work(&dlm->dispatched_work);
1976 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1978 struct dlm_ctxt *dlm = data;
1980 struct dlm_lock_resource *res;
1981 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
1988 res = item->u.am.lockres;
1989 ignore_higher = item->u.am.ignore_higher;
1990 request_from = item->u.am.request_from;
1991 flags = item->u.am.flags;
1993 spin_lock(&dlm->spinlock);
1994 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
1995 spin_unlock(&dlm->spinlock);
1997 clear_bit(dlm->node_num, nodemap);
1998 if (ignore_higher) {
1999 /* if is this just to clear up mles for nodes below
2000 * this node, do not send the message to the original
2001 * caller or any node number higher than this */
2002 clear_bit(request_from, nodemap);
2003 bit = dlm->node_num;
2005 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2007 if (bit >= O2NM_MAX_NODES)
2009 clear_bit(bit, nodemap);
2013 /* this call now finishes out the nodemap
2014 * even if one or more nodes die */
2015 mlog(0, "worker about to master %.*s here, this=%u\n",
2016 res->lockname.len, res->lockname.name, dlm->node_num);
2017 ret = dlm_do_assert_master(dlm, res->lockname.name,
2021 /* no need to restart, we are done */
2025 dlm_lockres_put(res);
2027 mlog(0, "finished with dlm_assert_master_worker\n");
2030 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2031 * We cannot wait for node recovery to complete to begin mastering this
2032 * lockres because this lockres is used to kick off recovery! ;-)
2033 * So, do a pre-check on all living nodes to see if any of those nodes
2034 * think that $RECOVERY is currently mastered by a dead node. If so,
2035 * we wait a short time to allow that node to get notified by its own
2036 * heartbeat stack, then check again. All $RECOVERY lock resources
2037 * mastered by dead nodes are purged when the hearbeat callback is
2038 * fired, so we can know for sure that it is safe to continue once
2039 * the node returns a live node or no node. */
2040 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2041 struct dlm_lock_resource *res)
2043 struct dlm_node_iter iter;
2046 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2048 spin_lock(&dlm->spinlock);
2049 dlm_node_iter_init(dlm->domain_map, &iter);
2050 spin_unlock(&dlm->spinlock);
2052 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2053 /* do not send to self */
2054 if (nodenum == dlm->node_num)
2056 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2059 if (!dlm_is_host_down(ret))
2061 /* host is down, so answer for that node would be
2062 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2065 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2066 /* check to see if this master is in the recovery map */
2067 spin_lock(&dlm->spinlock);
2068 if (test_bit(master, dlm->recovery_map)) {
2069 mlog(ML_NOTICE, "%s: node %u has not seen "
2070 "node %u go down yet, and thinks the "
2071 "dead node is mastering the recovery "
2072 "lock. must wait.\n", dlm->name,
2076 spin_unlock(&dlm->spinlock);
2077 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2087 * DLM_MIGRATE_LOCKRES
2091 int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2094 struct dlm_master_list_entry *mle = NULL;
2095 struct dlm_master_list_entry *oldmle = NULL;
2096 struct dlm_migratable_lockres *mres = NULL;
2099 unsigned int namelen;
2101 struct list_head *queue, *iter;
2103 struct dlm_lock *lock;
2109 name = res->lockname.name;
2110 namelen = res->lockname.len;
2112 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2115 * ensure this lockres is a proper candidate for migration
2117 spin_lock(&res->spinlock);
2118 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2119 mlog(0, "cannot migrate lockres with unknown owner!\n");
2120 spin_unlock(&res->spinlock);
2123 if (res->owner != dlm->node_num) {
2124 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2125 spin_unlock(&res->spinlock);
2128 mlog(0, "checking queues...\n");
2129 queue = &res->granted;
2130 for (i=0; i<3; i++) {
2131 list_for_each(iter, queue) {
2132 lock = list_entry (iter, struct dlm_lock, list);
2134 if (lock->ml.node == dlm->node_num) {
2135 mlog(0, "found a lock owned by this node "
2136 "still on the %s queue! will not "
2137 "migrate this lockres\n",
2139 (i==1 ? "converting" : "blocked"));
2140 spin_unlock(&res->spinlock);
2147 mlog(0, "all locks on this lockres are nonlocal. continuing\n");
2148 spin_unlock(&res->spinlock);
2152 mlog(0, "no locks were found on this lockres! done!\n");
2158 * preallocate up front
2159 * if this fails, abort
2163 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
2169 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2178 * find a node to migrate the lockres to
2181 mlog(0, "picking a migration node\n");
2182 spin_lock(&dlm->spinlock);
2183 /* pick a new node */
2184 if (!test_bit(target, dlm->domain_map) ||
2185 target >= O2NM_MAX_NODES) {
2186 target = dlm_pick_migration_target(dlm, res);
2188 mlog(0, "node %u chosen for migration\n", target);
2190 if (target >= O2NM_MAX_NODES ||
2191 !test_bit(target, dlm->domain_map)) {
2192 /* target chosen is not alive */
2197 spin_unlock(&dlm->spinlock);
2201 mlog(0, "continuing with target = %u\n", target);
2204 * clear any existing master requests and
2205 * add the migration mle to the list
2207 spin_lock(&dlm->master_lock);
2208 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2209 namelen, target, dlm->node_num);
2210 spin_unlock(&dlm->master_lock);
2211 spin_unlock(&dlm->spinlock);
2213 if (ret == -EEXIST) {
2214 mlog(0, "another process is already migrating it\n");
2220 * set the MIGRATING flag and flush asts
2221 * if we fail after this we need to re-dirty the lockres
2223 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2224 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2225 "the target went down.\n", res->lockname.len,
2226 res->lockname.name, target);
2227 spin_lock(&res->spinlock);
2228 res->state &= ~DLM_LOCK_RES_MIGRATING;
2229 spin_unlock(&res->spinlock);
2235 /* master is known, detach if not already detached */
2236 dlm_mle_detach_hb_events(dlm, oldmle);
2237 dlm_put_mle(oldmle);
2242 dlm_mle_detach_hb_events(dlm, mle);
2245 kmem_cache_free(dlm_mle_cache, mle);
2251 * at this point, we have a migration target, an mle
2252 * in the master list, and the MIGRATING flag set on
2257 /* get an extra reference on the mle.
2258 * otherwise the assert_master from the new
2259 * master will destroy this.
2260 * also, make sure that all callers of dlm_get_mle
2261 * take both dlm->spinlock and dlm->master_lock */
2262 spin_lock(&dlm->spinlock);
2263 spin_lock(&dlm->master_lock);
2264 dlm_get_mle_inuse(mle);
2265 spin_unlock(&dlm->master_lock);
2266 spin_unlock(&dlm->spinlock);
2268 /* notify new node and send all lock state */
2269 /* call send_one_lockres with migration flag.
2270 * this serves as notice to the target node that a
2271 * migration is starting. */
2272 ret = dlm_send_one_lockres(dlm, res, mres, target,
2273 DLM_MRES_MIGRATION);
2276 mlog(0, "migration to node %u failed with %d\n",
2278 /* migration failed, detach and clean up mle */
2279 dlm_mle_detach_hb_events(dlm, mle);
2281 dlm_put_mle_inuse(mle);
2282 spin_lock(&res->spinlock);
2283 res->state &= ~DLM_LOCK_RES_MIGRATING;
2284 spin_unlock(&res->spinlock);
2288 /* at this point, the target sends a message to all nodes,
2289 * (using dlm_do_migrate_request). this node is skipped since
2290 * we had to put an mle in the list to begin the process. this
2291 * node now waits for target to do an assert master. this node
2292 * will be the last one notified, ensuring that the migration
2293 * is complete everywhere. if the target dies while this is
2294 * going on, some nodes could potentially see the target as the
2295 * master, so it is important that my recovery finds the migration
2296 * mle and sets the master to UNKNONWN. */
2299 /* wait for new node to assert master */
2301 ret = wait_event_interruptible_timeout(mle->wq,
2302 (atomic_read(&mle->woken) == 1),
2303 msecs_to_jiffies(5000));
2306 if (atomic_read(&mle->woken) == 1 ||
2307 res->owner == target)
2310 mlog(0, "timed out during migration\n");
2311 /* avoid hang during shutdown when migrating lockres
2312 * to a node which also goes down */
2313 if (dlm_is_node_dead(dlm, target)) {
2314 mlog(0, "%s:%.*s: expected migration "
2315 "target %u is no longer up, restarting\n",
2316 dlm->name, res->lockname.len,
2317 res->lockname.name, target);
2321 if (ret == -ERESTARTSYS) {
2322 /* migration failed, detach and clean up mle */
2323 dlm_mle_detach_hb_events(dlm, mle);
2325 dlm_put_mle_inuse(mle);
2326 spin_lock(&res->spinlock);
2327 res->state &= ~DLM_LOCK_RES_MIGRATING;
2328 spin_unlock(&res->spinlock);
2331 /* TODO: if node died: stop, clean up, return error */
2334 /* all done, set the owner, clear the flag */
2335 spin_lock(&res->spinlock);
2336 dlm_set_lockres_owner(dlm, res, target);
2337 res->state &= ~DLM_LOCK_RES_MIGRATING;
2338 dlm_remove_nonlocal_locks(dlm, res);
2339 spin_unlock(&res->spinlock);
2342 /* master is known, detach if not already detached */
2343 dlm_mle_detach_hb_events(dlm, mle);
2344 dlm_put_mle_inuse(mle);
2347 dlm_lockres_calc_usage(dlm, res);
2350 /* re-dirty the lockres if we failed */
2352 dlm_kick_thread(dlm, res);
2356 free_page((unsigned long)mres);
2360 mlog(0, "returning %d\n", ret);
2363 EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2365 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2368 spin_lock(&dlm->ast_lock);
2369 spin_lock(&lock->spinlock);
2370 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2371 spin_unlock(&lock->spinlock);
2372 spin_unlock(&dlm->ast_lock);
2376 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2377 struct dlm_lock_resource *res,
2381 spin_lock(&res->spinlock);
2382 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2383 spin_unlock(&res->spinlock);
2385 /* target has died, so make the caller break out of the
2386 * wait_event, but caller must recheck the domain_map */
2387 spin_lock(&dlm->spinlock);
2388 if (!test_bit(mig_target, dlm->domain_map))
2390 spin_unlock(&dlm->spinlock);
2394 int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2397 spin_lock(&res->spinlock);
2398 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2399 spin_unlock(&res->spinlock);
2404 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2405 struct dlm_lock_resource *res,
2410 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2411 res->lockname.len, res->lockname.name, dlm->node_num,
2413 /* need to set MIGRATING flag on lockres. this is done by
2414 * ensuring that all asts have been flushed for this lockres. */
2415 spin_lock(&res->spinlock);
2416 BUG_ON(res->migration_pending);
2417 res->migration_pending = 1;
2418 /* strategy is to reserve an extra ast then release
2419 * it below, letting the release do all of the work */
2420 __dlm_lockres_reserve_ast(res);
2421 spin_unlock(&res->spinlock);
2423 /* now flush all the pending asts.. hang out for a bit */
2424 dlm_kick_thread(dlm, res);
2425 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2426 dlm_lockres_release_ast(dlm, res);
2428 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2429 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2430 /* if the extra ref we just put was the final one, this
2431 * will pass thru immediately. otherwise, we need to wait
2432 * for the last ast to finish. */
2434 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2435 dlm_migration_can_proceed(dlm, res, target),
2436 msecs_to_jiffies(1000));
2438 mlog(0, "woken again: migrating? %s, dead? %s\n",
2439 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2440 test_bit(target, dlm->domain_map) ? "no":"yes");
2442 mlog(0, "all is well: migrating? %s, dead? %s\n",
2443 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2444 test_bit(target, dlm->domain_map) ? "no":"yes");
2446 if (!dlm_migration_can_proceed(dlm, res, target)) {
2447 mlog(0, "trying again...\n");
2451 /* did the target go down or die? */
2452 spin_lock(&dlm->spinlock);
2453 if (!test_bit(target, dlm->domain_map)) {
2454 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2458 spin_unlock(&dlm->spinlock);
2463 * o the DLM_LOCK_RES_MIGRATING flag is set
2464 * o there are no pending asts on this lockres
2465 * o all processes trying to reserve an ast on this
2466 * lockres must wait for the MIGRATING flag to clear
2471 /* last step in the migration process.
2472 * original master calls this to free all of the dlm_lock
2473 * structures that used to be for other nodes. */
2474 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2475 struct dlm_lock_resource *res)
2477 struct list_head *iter, *iter2;
2478 struct list_head *queue = &res->granted;
2480 struct dlm_lock *lock;
2482 assert_spin_locked(&res->spinlock);
2484 BUG_ON(res->owner == dlm->node_num);
2486 for (i=0; i<3; i++) {
2487 list_for_each_safe(iter, iter2, queue) {
2488 lock = list_entry (iter, struct dlm_lock, list);
2489 if (lock->ml.node != dlm->node_num) {
2490 mlog(0, "putting lock for node %u\n",
2492 /* be extra careful */
2493 BUG_ON(!list_empty(&lock->ast_list));
2494 BUG_ON(!list_empty(&lock->bast_list));
2495 BUG_ON(lock->ast_pending);
2496 BUG_ON(lock->bast_pending);
2497 list_del_init(&lock->list);
2505 /* for now this is not too intelligent. we will
2506 * need stats to make this do the right thing.
2507 * this just finds the first lock on one of the
2508 * queues and uses that node as the target. */
2509 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2510 struct dlm_lock_resource *res)
2513 struct list_head *queue = &res->granted;
2514 struct list_head *iter;
2515 struct dlm_lock *lock;
2518 assert_spin_locked(&dlm->spinlock);
2520 spin_lock(&res->spinlock);
2521 for (i=0; i<3; i++) {
2522 list_for_each(iter, queue) {
2523 /* up to the caller to make sure this node
2525 lock = list_entry (iter, struct dlm_lock, list);
2526 if (lock->ml.node != dlm->node_num) {
2527 spin_unlock(&res->spinlock);
2528 return lock->ml.node;
2533 spin_unlock(&res->spinlock);
2534 mlog(0, "have not found a suitable target yet! checking domain map\n");
2536 /* ok now we're getting desperate. pick anyone alive. */
2539 nodenum = find_next_bit(dlm->domain_map,
2540 O2NM_MAX_NODES, nodenum+1);
2541 mlog(0, "found %d in domain map\n", nodenum);
2542 if (nodenum >= O2NM_MAX_NODES)
2544 if (nodenum != dlm->node_num) {
2545 mlog(0, "picking %d\n", nodenum);
2550 mlog(0, "giving up. no master to migrate to\n");
2551 return DLM_LOCK_RES_OWNER_UNKNOWN;
2556 /* this is called by the new master once all lockres
2557 * data has been received */
2558 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2559 struct dlm_lock_resource *res,
2560 u8 master, u8 new_master,
2561 struct dlm_node_iter *iter)
2563 struct dlm_migrate_request migrate;
2564 int ret, status = 0;
2567 memset(&migrate, 0, sizeof(migrate));
2568 migrate.namelen = res->lockname.len;
2569 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2570 migrate.new_master = new_master;
2571 migrate.master = master;
2575 /* send message to all nodes, except the master and myself */
2576 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2577 if (nodenum == master ||
2578 nodenum == new_master)
2581 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2582 &migrate, sizeof(migrate), nodenum,
2586 else if (status < 0) {
2587 mlog(0, "migrate request (node %u) returned %d!\n",
2596 mlog(0, "returning ret=%d\n", ret);
2601 /* if there is an existing mle for this lockres, we now know who the master is.
2602 * (the one who sent us *this* message) we can clear it up right away.
2603 * since the process that put the mle on the list still has a reference to it,
2604 * we can unhash it now, set the master and wake the process. as a result,
2605 * we will have no mle in the list to start with. now we can add an mle for
2606 * the migration and this should be the only one found for those scanning the
2608 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2610 struct dlm_ctxt *dlm = data;
2611 struct dlm_lock_resource *res = NULL;
2612 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2613 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2615 unsigned int namelen, hash;
2621 name = migrate->name;
2622 namelen = migrate->namelen;
2623 hash = dlm_lockid_hash(name, namelen);
2625 /* preallocate.. if this fails, abort */
2626 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2634 /* check for pre-existing lock */
2635 spin_lock(&dlm->spinlock);
2636 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
2637 spin_lock(&dlm->master_lock);
2640 spin_lock(&res->spinlock);
2641 if (res->state & DLM_LOCK_RES_RECOVERING) {
2642 /* if all is working ok, this can only mean that we got
2643 * a migrate request from a node that we now see as
2644 * dead. what can we do here? drop it to the floor? */
2645 spin_unlock(&res->spinlock);
2646 mlog(ML_ERROR, "Got a migrate request, but the "
2647 "lockres is marked as recovering!");
2648 kmem_cache_free(dlm_mle_cache, mle);
2649 ret = -EINVAL; /* need a better solution */
2652 res->state |= DLM_LOCK_RES_MIGRATING;
2653 spin_unlock(&res->spinlock);
2656 /* ignore status. only nonzero status would BUG. */
2657 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
2659 migrate->new_master,
2663 spin_unlock(&dlm->master_lock);
2664 spin_unlock(&dlm->spinlock);
2667 /* master is known, detach if not already detached */
2668 dlm_mle_detach_hb_events(dlm, oldmle);
2669 dlm_put_mle(oldmle);
2673 dlm_lockres_put(res);
2679 /* must be holding dlm->spinlock and dlm->master_lock
2680 * when adding a migration mle, we can clear any other mles
2681 * in the master list because we know with certainty that
2682 * the master is "master". so we remove any old mle from
2683 * the list after setting it's master field, and then add
2684 * the new migration mle. this way we can hold with the rule
2685 * of having only one mle for a given lock name at all times. */
2686 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2687 struct dlm_lock_resource *res,
2688 struct dlm_master_list_entry *mle,
2689 struct dlm_master_list_entry **oldmle,
2690 const char *name, unsigned int namelen,
2691 u8 new_master, u8 master)
2700 assert_spin_locked(&dlm->spinlock);
2701 assert_spin_locked(&dlm->master_lock);
2703 /* caller is responsible for any ref taken here on oldmle */
2704 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
2706 struct dlm_master_list_entry *tmp = *oldmle;
2707 spin_lock(&tmp->spinlock);
2708 if (tmp->type == DLM_MLE_MIGRATION) {
2709 if (master == dlm->node_num) {
2710 /* ah another process raced me to it */
2711 mlog(0, "tried to migrate %.*s, but some "
2712 "process beat me to it\n",
2716 /* bad. 2 NODES are trying to migrate! */
2717 mlog(ML_ERROR, "migration error mle: "
2718 "master=%u new_master=%u // request: "
2719 "master=%u new_master=%u // "
2721 tmp->master, tmp->new_master,
2727 /* this is essentially what assert_master does */
2728 tmp->master = master;
2729 atomic_set(&tmp->woken, 1);
2731 /* remove it from the list so that only one
2732 * mle will be found */
2733 list_del_init(&tmp->list);
2734 __dlm_mle_detach_hb_events(dlm, mle);
2736 spin_unlock(&tmp->spinlock);
2739 /* now add a migration mle to the tail of the list */
2740 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
2741 mle->new_master = new_master;
2742 mle->master = master;
2743 /* do this for consistency with other mle types */
2744 set_bit(new_master, mle->maybe_map);
2745 list_add(&mle->list, &dlm->master_list);
2751 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
2753 struct list_head *iter, *iter2;
2754 struct dlm_master_list_entry *mle;
2755 struct dlm_lock_resource *res;
2758 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
2760 assert_spin_locked(&dlm->spinlock);
2762 /* clean the master list */
2763 spin_lock(&dlm->master_lock);
2764 list_for_each_safe(iter, iter2, &dlm->master_list) {
2765 mle = list_entry(iter, struct dlm_master_list_entry, list);
2767 BUG_ON(mle->type != DLM_MLE_BLOCK &&
2768 mle->type != DLM_MLE_MASTER &&
2769 mle->type != DLM_MLE_MIGRATION);
2771 /* MASTER mles are initiated locally. the waiting
2772 * process will notice the node map change
2773 * shortly. let that happen as normal. */
2774 if (mle->type == DLM_MLE_MASTER)
2778 /* BLOCK mles are initiated by other nodes.
2779 * need to clean up if the dead node would have
2780 * been the master. */
2781 if (mle->type == DLM_MLE_BLOCK) {
2784 spin_lock(&mle->spinlock);
2785 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
2786 if (bit != dead_node) {
2787 mlog(0, "mle found, but dead node %u would "
2788 "not have been master\n", dead_node);
2789 spin_unlock(&mle->spinlock);
2791 /* must drop the refcount by one since the
2792 * assert_master will never arrive. this
2793 * may result in the mle being unlinked and
2794 * freed, but there may still be a process
2795 * waiting in the dlmlock path which is fine. */
2796 mlog(ML_ERROR, "node %u was expected master\n",
2798 atomic_set(&mle->woken, 1);
2799 spin_unlock(&mle->spinlock);
2801 /* do not need events any longer, so detach
2803 __dlm_mle_detach_hb_events(dlm, mle);
2809 /* everything else is a MIGRATION mle */
2811 /* the rule for MIGRATION mles is that the master
2812 * becomes UNKNOWN if *either* the original or
2813 * the new master dies. all UNKNOWN lockreses
2814 * are sent to whichever node becomes the recovery
2815 * master. the new master is responsible for
2816 * determining if there is still a master for
2817 * this lockres, or if he needs to take over
2818 * mastery. either way, this node should expect
2819 * another message to resolve this. */
2820 if (mle->master != dead_node &&
2821 mle->new_master != dead_node)
2824 /* if we have reached this point, this mle needs to
2825 * be removed from the list and freed. */
2827 /* remove from the list early. NOTE: unlinking
2828 * list_head while in list_for_each_safe */
2829 __dlm_mle_detach_hb_events(dlm, mle);
2830 spin_lock(&mle->spinlock);
2831 list_del_init(&mle->list);
2832 atomic_set(&mle->woken, 1);
2833 spin_unlock(&mle->spinlock);
2836 mlog(0, "%s: node %u died during migration from "
2837 "%u to %u!\n", dlm->name, dead_node,
2838 mle->master, mle->new_master);
2839 /* if there is a lockres associated with this
2840 * mle, find it and set its owner to UNKNOWN */
2841 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
2842 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
2843 mle->u.name.len, hash);
2845 /* unfortunately if we hit this rare case, our
2846 * lock ordering is messed. we need to drop
2847 * the master lock so that we can take the
2848 * lockres lock, meaning that we will have to
2849 * restart from the head of list. */
2850 spin_unlock(&dlm->master_lock);
2852 /* move lockres onto recovery list */
2853 spin_lock(&res->spinlock);
2854 dlm_set_lockres_owner(dlm, res,
2855 DLM_LOCK_RES_OWNER_UNKNOWN);
2856 dlm_move_lockres_to_recovery_list(dlm, res);
2857 spin_unlock(&res->spinlock);
2858 dlm_lockres_put(res);
2860 /* about to get rid of mle, detach from heartbeat */
2861 __dlm_mle_detach_hb_events(dlm, mle);
2864 spin_lock(&dlm->master_lock);
2866 spin_unlock(&dlm->master_lock);
2872 /* this may be the last reference */
2875 spin_unlock(&dlm->master_lock);
2879 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2882 struct dlm_node_iter iter;
2885 spin_lock(&dlm->spinlock);
2886 dlm_node_iter_init(dlm->domain_map, &iter);
2887 clear_bit(old_master, iter.node_map);
2888 clear_bit(dlm->node_num, iter.node_map);
2889 spin_unlock(&dlm->spinlock);
2891 mlog(0, "now time to do a migrate request to other nodes\n");
2892 ret = dlm_do_migrate_request(dlm, res, old_master,
2893 dlm->node_num, &iter);
2899 mlog(0, "doing assert master of %.*s to all except the original node\n",
2900 res->lockname.len, res->lockname.name);
2901 /* this call now finishes out the nodemap
2902 * even if one or more nodes die */
2903 ret = dlm_do_assert_master(dlm, res->lockname.name,
2904 res->lockname.len, iter.node_map,
2905 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2907 /* no longer need to retry. all living nodes contacted. */
2912 memset(iter.node_map, 0, sizeof(iter.node_map));
2913 set_bit(old_master, iter.node_map);
2914 mlog(0, "doing assert master of %.*s back to %u\n",
2915 res->lockname.len, res->lockname.name, old_master);
2916 ret = dlm_do_assert_master(dlm, res->lockname.name,
2917 res->lockname.len, iter.node_map,
2918 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2920 mlog(0, "assert master to original master failed "
2922 /* the only nonzero status here would be because of
2923 * a dead original node. we're done. */
2927 /* all done, set the owner, clear the flag */
2928 spin_lock(&res->spinlock);
2929 dlm_set_lockres_owner(dlm, res, dlm->node_num);
2930 res->state &= ~DLM_LOCK_RES_MIGRATING;
2931 spin_unlock(&res->spinlock);
2932 /* re-dirty it on the new master */
2933 dlm_kick_thread(dlm, res);
2940 * LOCKRES AST REFCOUNT
2941 * this is integral to migration
2944 /* for future intent to call an ast, reserve one ahead of time.
2945 * this should be called only after waiting on the lockres
2946 * with dlm_wait_on_lockres, and while still holding the
2947 * spinlock after the call. */
2948 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
2950 assert_spin_locked(&res->spinlock);
2951 if (res->state & DLM_LOCK_RES_MIGRATING) {
2952 __dlm_print_one_lock_resource(res);
2954 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2956 atomic_inc(&res->asts_reserved);
2960 * used to drop the reserved ast, either because it went unused,
2961 * or because the ast/bast was actually called.
2963 * also, if there is a pending migration on this lockres,
2964 * and this was the last pending ast on the lockres,
2965 * atomically set the MIGRATING flag before we drop the lock.
2966 * this is how we ensure that migration can proceed with no
2967 * asts in progress. note that it is ok if the state of the
2968 * queues is such that a lock should be granted in the future
2969 * or that a bast should be fired, because the new master will
2970 * shuffle the lists on this lockres as soon as it is migrated.
2972 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
2973 struct dlm_lock_resource *res)
2975 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
2978 if (!res->migration_pending) {
2979 spin_unlock(&res->spinlock);
2983 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2984 res->migration_pending = 0;
2985 res->state |= DLM_LOCK_RES_MIGRATING;
2986 spin_unlock(&res->spinlock);
2988 wake_up(&dlm->migration_wq);