1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/smp_lock.h>
31 #include <linux/crc32.h>
32 #include <linux/kthread.h>
33 #include <linux/pagemap.h>
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
37 #include <cluster/heartbeat.h>
38 #include <cluster/nodemanager.h>
39 #include <cluster/tcp.h>
41 #include <dlm/dlmapi.h>
43 #define MLOG_MASK_PREFIX ML_DLM_GLUE
44 #include <cluster/masklog.h>
51 #include "extent_map.h"
53 #include "heartbeat.h"
61 #include "buffer_head_io.h"
63 struct ocfs2_mask_waiter {
64 struct list_head mw_item;
66 struct completion mw_complete;
67 unsigned long mw_mask;
68 unsigned long mw_goal;
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
75 * Return value from ->downconvert_worker functions.
77 * These control the precise actions of ocfs2_unblock_lock()
78 * and ocfs2_process_blocked_lock()
81 enum ocfs2_unblock_action {
82 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
83 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
84 * ->post_unlock callback */
85 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
86 * ->post_unlock() callback. */
89 struct ocfs2_unblock_ctl {
91 enum ocfs2_unblock_action unblock_action;
94 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
96 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
98 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
101 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
104 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
105 struct ocfs2_lock_res *lockres);
108 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
110 /* This aids in debugging situations where a bad LVB might be involved. */
111 static void ocfs2_dump_meta_lvb_info(u64 level,
112 const char *function,
114 struct ocfs2_lock_res *lockres)
116 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
118 mlog(level, "LVB information for %s (called from %s:%u):\n",
119 lockres->l_name, function, line);
120 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
121 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
122 be32_to_cpu(lvb->lvb_igeneration));
123 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
124 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
125 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
126 be16_to_cpu(lvb->lvb_imode));
127 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
128 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
129 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
130 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
131 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
132 be32_to_cpu(lvb->lvb_iattr));
137 * OCFS2 Lock Resource Operations
139 * These fine tune the behavior of the generic dlmglue locking infrastructure.
141 * The most basic of lock types can point ->l_priv to their respective
142 * struct ocfs2_super and allow the default actions to manage things.
144 * Right now, each lock type also needs to implement an init function,
145 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
146 * should be called when the lock is no longer needed (i.e., object
149 struct ocfs2_lock_res_ops {
151 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
152 * this callback if ->l_priv is not an ocfs2_super pointer
154 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
157 * Optionally called in the downconvert (or "vote") thread
158 * after a successful downconvert. The lockres will not be
159 * referenced after this callback is called, so it is safe to
162 * The exact semantics of when this is called are controlled
163 * by ->downconvert_worker()
165 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
168 * Allow a lock type to add checks to determine whether it is
169 * safe to downconvert a lock. Return 0 to re-queue the
170 * downconvert at a later time, nonzero to continue.
172 * For most locks, the default checks that there are no
173 * incompatible holders are sufficient.
175 * Called with the lockres spinlock held.
177 int (*check_downconvert)(struct ocfs2_lock_res *, int);
180 * Allows a lock type to populate the lock value block. This
181 * is called on downconvert, and when we drop a lock.
183 * Locks that want to use this should set LOCK_TYPE_USES_LVB
184 * in the flags field.
186 * Called with the lockres spinlock held.
188 void (*set_lvb)(struct ocfs2_lock_res *);
191 * Called from the downconvert thread when it is determined
192 * that a lock will be downconverted. This is called without
193 * any locks held so the function can do work that might
194 * schedule (syncing out data, etc).
196 * This should return any one of the ocfs2_unblock_action
197 * values, depending on what it wants the thread to do.
199 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
202 * LOCK_TYPE_* flags which describe the specific requirements
203 * of a lock type. Descriptions of each individual flag follow.
209 * Some locks want to "refresh" potentially stale data when a
210 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
211 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
212 * individual lockres l_flags member from the ast function. It is
213 * expected that the locking wrapper will clear the
214 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
216 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
219 * Indicate that a lock type makes use of the lock value block. The
220 * ->set_lvb lock type callback must be defined.
222 #define LOCK_TYPE_USES_LVB 0x2
224 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
225 .get_osb = ocfs2_get_inode_osb,
229 static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
230 .get_osb = ocfs2_get_inode_osb,
231 .check_downconvert = ocfs2_check_meta_downconvert,
232 .set_lvb = ocfs2_set_meta_lvb,
233 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
236 static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
237 .get_osb = ocfs2_get_inode_osb,
238 .downconvert_worker = ocfs2_data_convert_worker,
242 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
243 .flags = LOCK_TYPE_REQUIRES_REFRESH,
246 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
250 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
251 .get_osb = ocfs2_get_dentry_osb,
252 .post_unlock = ocfs2_dentry_post_unlock,
253 .downconvert_worker = ocfs2_dentry_convert_worker,
257 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
258 .get_osb = ocfs2_get_inode_osb,
262 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
264 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
265 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
266 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
267 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
270 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
272 BUG_ON(!ocfs2_is_inode_lock(lockres));
274 return (struct inode *) lockres->l_priv;
277 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
279 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
281 return (struct ocfs2_dentry_lock *)lockres->l_priv;
284 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
286 if (lockres->l_ops->get_osb)
287 return lockres->l_ops->get_osb(lockres);
289 return (struct ocfs2_super *)lockres->l_priv;
292 static int ocfs2_lock_create(struct ocfs2_super *osb,
293 struct ocfs2_lock_res *lockres,
296 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
298 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
299 struct ocfs2_lock_res *lockres,
301 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
302 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
303 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
304 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
305 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
306 struct ocfs2_lock_res *lockres);
307 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
309 #define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
310 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
311 "resource %s: %s\n", dlm_errname(_stat), _func, \
312 _lockres->l_name, dlm_errmsg(_stat)); \
314 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
315 struct ocfs2_lock_res *lockres);
316 static int ocfs2_meta_lock_update(struct inode *inode,
317 struct buffer_head **bh);
318 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
319 static inline int ocfs2_highest_compat_lock_level(int level);
321 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
330 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
332 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
333 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
334 (long long)blkno, generation);
336 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
338 mlog(0, "built lock resource with name: %s\n", name);
343 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
345 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
346 struct ocfs2_dlm_debug *dlm_debug)
348 mlog(0, "Add tracking for lockres %s\n", res->l_name);
350 spin_lock(&ocfs2_dlm_tracking_lock);
351 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
352 spin_unlock(&ocfs2_dlm_tracking_lock);
355 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
357 spin_lock(&ocfs2_dlm_tracking_lock);
358 if (!list_empty(&res->l_debug_list))
359 list_del_init(&res->l_debug_list);
360 spin_unlock(&ocfs2_dlm_tracking_lock);
363 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
364 struct ocfs2_lock_res *res,
365 enum ocfs2_lock_type type,
366 struct ocfs2_lock_res_ops *ops,
373 res->l_level = LKM_IVMODE;
374 res->l_requested = LKM_IVMODE;
375 res->l_blocking = LKM_IVMODE;
376 res->l_action = OCFS2_AST_INVALID;
377 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
379 res->l_flags = OCFS2_LOCK_INITIALIZED;
381 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
384 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
386 /* This also clears out the lock status block */
387 memset(res, 0, sizeof(struct ocfs2_lock_res));
388 spin_lock_init(&res->l_lock);
389 init_waitqueue_head(&res->l_event);
390 INIT_LIST_HEAD(&res->l_blocked_list);
391 INIT_LIST_HEAD(&res->l_mask_waiters);
394 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
395 enum ocfs2_lock_type type,
396 unsigned int generation,
399 struct ocfs2_lock_res_ops *ops;
402 case OCFS2_LOCK_TYPE_RW:
403 ops = &ocfs2_inode_rw_lops;
405 case OCFS2_LOCK_TYPE_META:
406 ops = &ocfs2_inode_meta_lops;
408 case OCFS2_LOCK_TYPE_DATA:
409 ops = &ocfs2_inode_data_lops;
411 case OCFS2_LOCK_TYPE_OPEN:
412 ops = &ocfs2_inode_open_lops;
415 mlog_bug_on_msg(1, "type: %d\n", type);
416 ops = NULL; /* thanks, gcc */
420 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
421 generation, res->l_name);
422 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
425 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
427 struct inode *inode = ocfs2_lock_res_inode(lockres);
429 return OCFS2_SB(inode->i_sb);
432 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
434 __be64 inode_blkno_be;
436 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
439 return be64_to_cpu(inode_blkno_be);
442 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
444 struct ocfs2_dentry_lock *dl = lockres->l_priv;
446 return OCFS2_SB(dl->dl_inode->i_sb);
449 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
450 u64 parent, struct inode *inode)
453 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
454 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
455 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
457 ocfs2_lock_res_init_once(lockres);
460 * Unfortunately, the standard lock naming scheme won't work
461 * here because we have two 16 byte values to use. Instead,
462 * we'll stuff the inode number as a binary value. We still
463 * want error prints to show something without garbling the
464 * display, so drop a null byte in there before the inode
465 * number. A future version of OCFS2 will likely use all
466 * binary lock names. The stringified names have been a
467 * tremendous aid in debugging, but now that the debugfs
468 * interface exists, we can mangle things there if need be.
470 * NOTE: We also drop the standard "pad" value (the total lock
471 * name size stays the same though - the last part is all
472 * zeros due to the memset in ocfs2_lock_res_init_once()
474 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
476 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
479 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
481 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
484 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
485 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
489 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
490 struct ocfs2_super *osb)
492 /* Superblock lockres doesn't come from a slab so we call init
493 * once on it manually. */
494 ocfs2_lock_res_init_once(res);
495 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
497 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
498 &ocfs2_super_lops, osb);
501 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
502 struct ocfs2_super *osb)
504 /* Rename lockres doesn't come from a slab so we call init
505 * once on it manually. */
506 ocfs2_lock_res_init_once(res);
507 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
508 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
509 &ocfs2_rename_lops, osb);
512 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
516 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
519 ocfs2_remove_lockres_tracking(res);
521 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
522 "Lockres %s is on the blocked list\n",
524 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
525 "Lockres %s has mask waiters pending\n",
527 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
528 "Lockres %s is locked\n",
530 mlog_bug_on_msg(res->l_ro_holders,
531 "Lockres %s has %u ro holders\n",
532 res->l_name, res->l_ro_holders);
533 mlog_bug_on_msg(res->l_ex_holders,
534 "Lockres %s has %u ex holders\n",
535 res->l_name, res->l_ex_holders);
537 /* Need to clear out the lock status block for the dlm */
538 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
544 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
553 lockres->l_ex_holders++;
556 lockres->l_ro_holders++;
565 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
574 BUG_ON(!lockres->l_ex_holders);
575 lockres->l_ex_holders--;
578 BUG_ON(!lockres->l_ro_holders);
579 lockres->l_ro_holders--;
587 /* WARNING: This function lives in a world where the only three lock
588 * levels are EX, PR, and NL. It *will* have to be adjusted when more
589 * lock types are added. */
590 static inline int ocfs2_highest_compat_lock_level(int level)
592 int new_level = LKM_EXMODE;
594 if (level == LKM_EXMODE)
595 new_level = LKM_NLMODE;
596 else if (level == LKM_PRMODE)
597 new_level = LKM_PRMODE;
601 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
602 unsigned long newflags)
604 struct list_head *pos, *tmp;
605 struct ocfs2_mask_waiter *mw;
607 assert_spin_locked(&lockres->l_lock);
609 lockres->l_flags = newflags;
611 list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) {
612 mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item);
613 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
616 list_del_init(&mw->mw_item);
618 complete(&mw->mw_complete);
621 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
623 lockres_set_flags(lockres, lockres->l_flags | or);
625 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
628 lockres_set_flags(lockres, lockres->l_flags & ~clear);
631 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
635 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
636 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
637 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
638 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
640 lockres->l_level = lockres->l_requested;
641 if (lockres->l_level <=
642 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
643 lockres->l_blocking = LKM_NLMODE;
644 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
646 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
651 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
655 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
656 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
658 /* Convert from RO to EX doesn't really need anything as our
659 * information is already up to data. Convert from NL to
660 * *anything* however should mark ourselves as needing an
662 if (lockres->l_level == LKM_NLMODE &&
663 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
664 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
666 lockres->l_level = lockres->l_requested;
667 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
672 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
676 BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
677 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
679 if (lockres->l_requested > LKM_NLMODE &&
680 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
681 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
682 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
684 lockres->l_level = lockres->l_requested;
685 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
686 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
691 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
694 int needs_downconvert = 0;
697 assert_spin_locked(&lockres->l_lock);
699 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
701 if (level > lockres->l_blocking) {
702 /* only schedule a downconvert if we haven't already scheduled
703 * one that goes low enough to satisfy the level we're
704 * blocking. this also catches the case where we get
706 if (ocfs2_highest_compat_lock_level(level) <
707 ocfs2_highest_compat_lock_level(lockres->l_blocking))
708 needs_downconvert = 1;
710 lockres->l_blocking = level;
713 mlog_exit(needs_downconvert);
714 return needs_downconvert;
717 static void ocfs2_blocking_ast(void *opaque, int level)
719 struct ocfs2_lock_res *lockres = opaque;
720 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
721 int needs_downconvert;
724 BUG_ON(level <= LKM_NLMODE);
726 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
727 lockres->l_name, level, lockres->l_level,
728 ocfs2_lock_type_string(lockres->l_type));
730 spin_lock_irqsave(&lockres->l_lock, flags);
731 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
732 if (needs_downconvert)
733 ocfs2_schedule_blocked_lock(osb, lockres);
734 spin_unlock_irqrestore(&lockres->l_lock, flags);
736 wake_up(&lockres->l_event);
738 ocfs2_kick_vote_thread(osb);
741 static void ocfs2_locking_ast(void *opaque)
743 struct ocfs2_lock_res *lockres = opaque;
744 struct dlm_lockstatus *lksb = &lockres->l_lksb;
747 spin_lock_irqsave(&lockres->l_lock, flags);
749 if (lksb->status != DLM_NORMAL) {
750 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
751 lockres->l_name, lksb->status);
752 spin_unlock_irqrestore(&lockres->l_lock, flags);
756 switch(lockres->l_action) {
757 case OCFS2_AST_ATTACH:
758 ocfs2_generic_handle_attach_action(lockres);
759 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
761 case OCFS2_AST_CONVERT:
762 ocfs2_generic_handle_convert_action(lockres);
764 case OCFS2_AST_DOWNCONVERT:
765 ocfs2_generic_handle_downconvert_action(lockres);
768 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
769 "lockres flags = 0x%lx, unlock action: %u\n",
770 lockres->l_name, lockres->l_action, lockres->l_flags,
771 lockres->l_unlock_action);
775 /* set it to something invalid so if we get called again we
777 lockres->l_action = OCFS2_AST_INVALID;
779 wake_up(&lockres->l_event);
780 spin_unlock_irqrestore(&lockres->l_lock, flags);
783 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
789 spin_lock_irqsave(&lockres->l_lock, flags);
790 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
792 lockres->l_action = OCFS2_AST_INVALID;
794 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
795 spin_unlock_irqrestore(&lockres->l_lock, flags);
797 wake_up(&lockres->l_event);
801 /* Note: If we detect another process working on the lock (i.e.,
802 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
803 * to do the right thing in that case.
805 static int ocfs2_lock_create(struct ocfs2_super *osb,
806 struct ocfs2_lock_res *lockres,
811 enum dlm_status status = DLM_NORMAL;
816 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
819 spin_lock_irqsave(&lockres->l_lock, flags);
820 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
821 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
822 spin_unlock_irqrestore(&lockres->l_lock, flags);
826 lockres->l_action = OCFS2_AST_ATTACH;
827 lockres->l_requested = level;
828 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
829 spin_unlock_irqrestore(&lockres->l_lock, flags);
831 status = dlmlock(osb->dlm,
836 OCFS2_LOCK_ID_MAX_LEN - 1,
840 if (status != DLM_NORMAL) {
841 ocfs2_log_dlm_error("dlmlock", status, lockres);
843 ocfs2_recover_from_dlm_error(lockres, 1);
846 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
853 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
859 spin_lock_irqsave(&lockres->l_lock, flags);
860 ret = lockres->l_flags & flag;
861 spin_unlock_irqrestore(&lockres->l_lock, flags);
866 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
869 wait_event(lockres->l_event,
870 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
873 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
876 wait_event(lockres->l_event,
877 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
880 /* predict what lock level we'll be dropping down to on behalf
881 * of another node, and return true if the currently wanted
882 * level will be compatible with it. */
883 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
886 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
888 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
891 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
893 INIT_LIST_HEAD(&mw->mw_item);
894 init_completion(&mw->mw_complete);
897 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
899 wait_for_completion(&mw->mw_complete);
900 /* Re-arm the completion in case we want to wait on it again */
901 INIT_COMPLETION(mw->mw_complete);
902 return mw->mw_status;
905 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
906 struct ocfs2_mask_waiter *mw,
910 BUG_ON(!list_empty(&mw->mw_item));
912 assert_spin_locked(&lockres->l_lock);
914 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
919 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
920 * if the mask still hadn't reached its goal */
921 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
922 struct ocfs2_mask_waiter *mw)
927 spin_lock_irqsave(&lockres->l_lock, flags);
928 if (!list_empty(&mw->mw_item)) {
929 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
932 list_del_init(&mw->mw_item);
933 init_completion(&mw->mw_complete);
935 spin_unlock_irqrestore(&lockres->l_lock, flags);
941 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
942 struct ocfs2_lock_res *lockres,
947 struct ocfs2_mask_waiter mw;
948 enum dlm_status status;
949 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
950 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
955 ocfs2_init_mask_waiter(&mw);
957 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
958 lkm_flags |= LKM_VALBLK;
963 if (catch_signals && signal_pending(current)) {
968 spin_lock_irqsave(&lockres->l_lock, flags);
970 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
971 "Cluster lock called on freeing lockres %s! flags "
972 "0x%lx\n", lockres->l_name, lockres->l_flags);
974 /* We only compare against the currently granted level
975 * here. If the lock is blocked waiting on a downconvert,
976 * we'll get caught below. */
977 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
978 level > lockres->l_level) {
979 /* is someone sitting in dlm_lock? If so, wait on
981 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
986 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
987 /* lock has not been created yet. */
988 spin_unlock_irqrestore(&lockres->l_lock, flags);
990 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
998 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
999 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1000 /* is the lock is currently blocked on behalf of
1002 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1007 if (level > lockres->l_level) {
1008 if (lockres->l_action != OCFS2_AST_INVALID)
1009 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1010 lockres->l_name, lockres->l_action);
1012 lockres->l_action = OCFS2_AST_CONVERT;
1013 lockres->l_requested = level;
1014 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1015 spin_unlock_irqrestore(&lockres->l_lock, flags);
1017 BUG_ON(level == LKM_IVMODE);
1018 BUG_ON(level == LKM_NLMODE);
1020 mlog(0, "lock %s, convert from %d to level = %d\n",
1021 lockres->l_name, lockres->l_level, level);
1023 /* call dlm_lock to upgrade lock now */
1024 status = dlmlock(osb->dlm,
1027 lkm_flags|LKM_CONVERT,
1029 OCFS2_LOCK_ID_MAX_LEN - 1,
1032 ocfs2_blocking_ast);
1033 if (status != DLM_NORMAL) {
1034 if ((lkm_flags & LKM_NOQUEUE) &&
1035 (status == DLM_NOTQUEUED))
1038 ocfs2_log_dlm_error("dlmlock", status,
1042 ocfs2_recover_from_dlm_error(lockres, 1);
1046 mlog(0, "lock %s, successfull return from dlmlock\n",
1049 /* At this point we've gone inside the dlm and need to
1050 * complete our work regardless. */
1053 /* wait for busy to clear and carry on */
1057 /* Ok, if we get here then we're good to go. */
1058 ocfs2_inc_holders(lockres, level);
1062 spin_unlock_irqrestore(&lockres->l_lock, flags);
1065 * This is helping work around a lock inversion between the page lock
1066 * and dlm locks. One path holds the page lock while calling aops
1067 * which block acquiring dlm locks. The voting thread holds dlm
1068 * locks while acquiring page locks while down converting data locks.
1069 * This block is helping an aop path notice the inversion and back
1070 * off to unlock its page lock before trying the dlm lock again.
1072 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1073 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1075 if (lockres_remove_mask_waiter(lockres, &mw))
1081 ret = ocfs2_wait_for_mask(&mw);
1091 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1092 struct ocfs2_lock_res *lockres,
1095 unsigned long flags;
1098 spin_lock_irqsave(&lockres->l_lock, flags);
1099 ocfs2_dec_holders(lockres, level);
1100 ocfs2_vote_on_unlock(osb, lockres);
1101 spin_unlock_irqrestore(&lockres->l_lock, flags);
1105 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1106 struct ocfs2_lock_res *lockres,
1110 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1111 unsigned long flags;
1112 int lkm_flags = local ? LKM_LOCAL : 0;
1114 spin_lock_irqsave(&lockres->l_lock, flags);
1115 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1116 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1117 spin_unlock_irqrestore(&lockres->l_lock, flags);
1119 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1122 /* Grants us an EX lock on the data and metadata resources, skipping
1123 * the normal cluster directory lookup. Use this ONLY on newly created
1124 * inodes which other nodes can't possibly see, and which haven't been
1125 * hashed in the inode hash yet. This can give us a good performance
1126 * increase as it'll skip the network broadcast normally associated
1127 * with creating a new lock resource. */
1128 int ocfs2_create_new_inode_locks(struct inode *inode)
1131 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1134 BUG_ON(!ocfs2_inode_is_new(inode));
1138 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1140 /* NOTE: That we don't increment any of the holder counts, nor
1141 * do we add anything to a journal handle. Since this is
1142 * supposed to be a new inode which the cluster doesn't know
1143 * about yet, there is no need to. As far as the LVB handling
1144 * is concerned, this is basically like acquiring an EX lock
1145 * on a resource which has an invalid one -- we'll set it
1146 * valid when we release the EX. */
1148 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1155 * We don't want to use LKM_LOCAL on a meta data lock as they
1156 * don't use a generation in their lock names.
1158 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0);
1164 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1);
1170 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1181 int ocfs2_rw_lock(struct inode *inode, int write)
1184 struct ocfs2_lock_res *lockres;
1185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1191 mlog(0, "inode %llu take %s RW lock\n",
1192 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1193 write ? "EXMODE" : "PRMODE");
1195 if (ocfs2_mount_local(osb))
1198 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1200 level = write ? LKM_EXMODE : LKM_PRMODE;
1202 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1211 void ocfs2_rw_unlock(struct inode *inode, int write)
1213 int level = write ? LKM_EXMODE : LKM_PRMODE;
1214 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1215 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1219 mlog(0, "inode %llu drop %s RW lock\n",
1220 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1221 write ? "EXMODE" : "PRMODE");
1223 if (!ocfs2_mount_local(osb))
1224 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1230 * ocfs2_open_lock always get PR mode lock.
1232 int ocfs2_open_lock(struct inode *inode)
1235 struct ocfs2_lock_res *lockres;
1236 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1242 mlog(0, "inode %llu take PRMODE open lock\n",
1243 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1245 if (ocfs2_mount_local(osb))
1248 lockres = &OCFS2_I(inode)->ip_open_lockres;
1250 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1260 int ocfs2_try_open_lock(struct inode *inode, int write)
1262 int status = 0, level;
1263 struct ocfs2_lock_res *lockres;
1264 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1270 mlog(0, "inode %llu try to take %s open lock\n",
1271 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1272 write ? "EXMODE" : "PRMODE");
1274 if (ocfs2_mount_local(osb))
1277 lockres = &OCFS2_I(inode)->ip_open_lockres;
1279 level = write ? LKM_EXMODE : LKM_PRMODE;
1282 * The file system may already holding a PRMODE/EXMODE open lock.
1283 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1284 * other nodes and the -EAGAIN will indicate to the caller that
1285 * this inode is still in use.
1287 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1288 level, LKM_NOQUEUE, 0);
1296 * ocfs2_open_unlock unlock PR and EX mode open locks.
1298 void ocfs2_open_unlock(struct inode *inode)
1300 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1301 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1305 mlog(0, "inode %llu drop open lock\n",
1306 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1308 if (ocfs2_mount_local(osb))
1311 if(lockres->l_ro_holders)
1312 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1314 if(lockres->l_ex_holders)
1315 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1322 int ocfs2_data_lock_full(struct inode *inode,
1326 int status = 0, level;
1327 struct ocfs2_lock_res *lockres;
1328 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1334 mlog(0, "inode %llu take %s DATA lock\n",
1335 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1336 write ? "EXMODE" : "PRMODE");
1338 /* We'll allow faking a readonly data lock for
1340 if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
1348 if (ocfs2_mount_local(osb))
1351 lockres = &OCFS2_I(inode)->ip_data_lockres;
1353 level = write ? LKM_EXMODE : LKM_PRMODE;
1355 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
1357 if (status < 0 && status != -EAGAIN)
1365 /* see ocfs2_meta_lock_with_page() */
1366 int ocfs2_data_lock_with_page(struct inode *inode,
1372 ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
1373 if (ret == -EAGAIN) {
1375 if (ocfs2_data_lock(inode, write) == 0)
1376 ocfs2_data_unlock(inode, write);
1377 ret = AOP_TRUNCATED_PAGE;
1383 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
1384 struct ocfs2_lock_res *lockres)
1390 /* If we know that another node is waiting on our lock, kick
1391 * the vote thread * pre-emptively when we reach a release
1393 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1394 switch(lockres->l_blocking) {
1396 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1400 if (!lockres->l_ex_holders)
1409 ocfs2_kick_vote_thread(osb);
1414 void ocfs2_data_unlock(struct inode *inode,
1417 int level = write ? LKM_EXMODE : LKM_PRMODE;
1418 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1419 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1423 mlog(0, "inode %llu drop %s DATA lock\n",
1424 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1425 write ? "EXMODE" : "PRMODE");
1427 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1428 !ocfs2_mount_local(osb))
1429 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1434 #define OCFS2_SEC_BITS 34
1435 #define OCFS2_SEC_SHIFT (64 - 34)
1436 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1438 /* LVB only has room for 64 bits of time here so we pack it for
1440 static u64 ocfs2_pack_timespec(struct timespec *spec)
1443 u64 sec = spec->tv_sec;
1444 u32 nsec = spec->tv_nsec;
1446 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1451 /* Call this with the lockres locked. I am reasonably sure we don't
1452 * need ip_lock in this function as anyone who would be changing those
1453 * values is supposed to be blocked in ocfs2_meta_lock right now. */
1454 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1456 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1457 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1458 struct ocfs2_meta_lvb *lvb;
1462 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1465 * Invalidate the LVB of a deleted inode - this way other
1466 * nodes are forced to go to disk and discover the new inode
1469 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1470 lvb->lvb_version = 0;
1474 lvb->lvb_version = OCFS2_LVB_VERSION;
1475 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1476 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1477 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1478 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1479 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1480 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1481 lvb->lvb_iatime_packed =
1482 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1483 lvb->lvb_ictime_packed =
1484 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1485 lvb->lvb_imtime_packed =
1486 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1487 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1488 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1491 mlog_meta_lvb(0, lockres);
1496 static void ocfs2_unpack_timespec(struct timespec *spec,
1499 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1500 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1503 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1505 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1506 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1507 struct ocfs2_meta_lvb *lvb;
1511 mlog_meta_lvb(0, lockres);
1513 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1515 /* We're safe here without the lockres lock... */
1516 spin_lock(&oi->ip_lock);
1517 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1518 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1520 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1521 ocfs2_set_inode_flags(inode);
1523 /* fast-symlinks are a special case */
1524 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1525 inode->i_blocks = 0;
1527 inode->i_blocks = ocfs2_inode_sector_count(inode);
1529 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1530 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1531 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1532 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1533 ocfs2_unpack_timespec(&inode->i_atime,
1534 be64_to_cpu(lvb->lvb_iatime_packed));
1535 ocfs2_unpack_timespec(&inode->i_mtime,
1536 be64_to_cpu(lvb->lvb_imtime_packed));
1537 ocfs2_unpack_timespec(&inode->i_ctime,
1538 be64_to_cpu(lvb->lvb_ictime_packed));
1539 spin_unlock(&oi->ip_lock);
1544 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1545 struct ocfs2_lock_res *lockres)
1547 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1549 if (lvb->lvb_version == OCFS2_LVB_VERSION
1550 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1555 /* Determine whether a lock resource needs to be refreshed, and
1556 * arbitrate who gets to refresh it.
1558 * 0 means no refresh needed.
1560 * > 0 means you need to refresh this and you MUST call
1561 * ocfs2_complete_lock_res_refresh afterwards. */
1562 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1564 unsigned long flags;
1570 spin_lock_irqsave(&lockres->l_lock, flags);
1571 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1572 spin_unlock_irqrestore(&lockres->l_lock, flags);
1576 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1577 spin_unlock_irqrestore(&lockres->l_lock, flags);
1579 ocfs2_wait_on_refreshing_lock(lockres);
1583 /* Ok, I'll be the one to refresh this lock. */
1584 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1585 spin_unlock_irqrestore(&lockres->l_lock, flags);
1593 /* If status is non zero, I'll mark it as not being in refresh
1594 * anymroe, but i won't clear the needs refresh flag. */
1595 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1598 unsigned long flags;
1601 spin_lock_irqsave(&lockres->l_lock, flags);
1602 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1604 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1605 spin_unlock_irqrestore(&lockres->l_lock, flags);
1607 wake_up(&lockres->l_event);
1612 /* may or may not return a bh if it went to disk. */
1613 static int ocfs2_meta_lock_update(struct inode *inode,
1614 struct buffer_head **bh)
1617 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1618 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1619 struct ocfs2_dinode *fe;
1620 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1624 if (ocfs2_mount_local(osb))
1627 spin_lock(&oi->ip_lock);
1628 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1629 mlog(0, "Orphaned inode %llu was deleted while we "
1630 "were waiting on a lock. ip_flags = 0x%x\n",
1631 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1632 spin_unlock(&oi->ip_lock);
1636 spin_unlock(&oi->ip_lock);
1638 if (!ocfs2_should_refresh_lock_res(lockres))
1641 /* This will discard any caching information we might have had
1642 * for the inode metadata. */
1643 ocfs2_metadata_cache_purge(inode);
1645 ocfs2_extent_map_trunc(inode, 0);
1647 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1648 mlog(0, "Trusting LVB on inode %llu\n",
1649 (unsigned long long)oi->ip_blkno);
1650 ocfs2_refresh_inode_from_lvb(inode);
1652 /* Boo, we have to go to disk. */
1653 /* read bh, cast, ocfs2_refresh_inode */
1654 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1655 bh, OCFS2_BH_CACHED, inode);
1660 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1662 /* This is a good chance to make sure we're not
1663 * locking an invalid object.
1665 * We bug on a stale inode here because we checked
1666 * above whether it was wiped from disk. The wiping
1667 * node provides a guarantee that we receive that
1668 * message and can mark the inode before dropping any
1669 * locks associated with it. */
1670 if (!OCFS2_IS_VALID_DINODE(fe)) {
1671 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1675 mlog_bug_on_msg(inode->i_generation !=
1676 le32_to_cpu(fe->i_generation),
1677 "Invalid dinode %llu disk generation: %u "
1678 "inode->i_generation: %u\n",
1679 (unsigned long long)oi->ip_blkno,
1680 le32_to_cpu(fe->i_generation),
1681 inode->i_generation);
1682 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1683 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1684 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1685 (unsigned long long)oi->ip_blkno,
1686 (unsigned long long)le64_to_cpu(fe->i_dtime),
1687 le32_to_cpu(fe->i_flags));
1689 ocfs2_refresh_inode(inode, fe);
1694 ocfs2_complete_lock_res_refresh(lockres, status);
1700 static int ocfs2_assign_bh(struct inode *inode,
1701 struct buffer_head **ret_bh,
1702 struct buffer_head *passed_bh)
1707 /* Ok, the update went to disk for us, use the
1709 *ret_bh = passed_bh;
1715 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1716 OCFS2_I(inode)->ip_blkno,
1727 * returns < 0 error if the callback will never be called, otherwise
1728 * the result of the lock will be communicated via the callback.
1730 int ocfs2_meta_lock_full(struct inode *inode,
1731 struct buffer_head **ret_bh,
1735 int status, level, dlm_flags, acquired;
1736 struct ocfs2_lock_res *lockres = NULL;
1737 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1738 struct buffer_head *local_bh = NULL;
1744 mlog(0, "inode %llu, take %s META lock\n",
1745 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1746 ex ? "EXMODE" : "PRMODE");
1750 /* We'll allow faking a readonly metadata lock for
1752 if (ocfs2_is_hard_readonly(osb)) {
1758 if (ocfs2_mount_local(osb))
1761 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1762 wait_event(osb->recovery_event,
1763 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1765 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1766 level = ex ? LKM_EXMODE : LKM_PRMODE;
1768 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1769 dlm_flags |= LKM_NOQUEUE;
1771 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1773 if (status != -EAGAIN && status != -EIOCBRETRY)
1778 /* Notify the error cleanup path to drop the cluster lock. */
1781 /* We wait twice because a node may have died while we were in
1782 * the lower dlm layers. The second time though, we've
1783 * committed to owning this lock so we don't allow signals to
1784 * abort the operation. */
1785 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1786 wait_event(osb->recovery_event,
1787 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1791 * We only see this flag if we're being called from
1792 * ocfs2_read_locked_inode(). It means we're locking an inode
1793 * which hasn't been populated yet, so clear the refresh flag
1794 * and let the caller handle it.
1796 if (inode->i_state & I_NEW) {
1799 ocfs2_complete_lock_res_refresh(lockres, 0);
1803 /* This is fun. The caller may want a bh back, or it may
1804 * not. ocfs2_meta_lock_update definitely wants one in, but
1805 * may or may not read one, depending on what's in the
1806 * LVB. The result of all of this is that we've *only* gone to
1807 * disk if we have to, so the complexity is worthwhile. */
1808 status = ocfs2_meta_lock_update(inode, &local_bh);
1810 if (status != -ENOENT)
1816 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1825 if (ret_bh && (*ret_bh)) {
1830 ocfs2_meta_unlock(inode, ex);
1841 * This is working around a lock inversion between tasks acquiring DLM locks
1842 * while holding a page lock and the vote thread which blocks dlm lock acquiry
1843 * while acquiring page locks.
1845 * ** These _with_page variantes are only intended to be called from aop
1846 * methods that hold page locks and return a very specific *positive* error
1847 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1849 * The DLM is called such that it returns -EAGAIN if it would have blocked
1850 * waiting for the vote thread. In that case we unlock our page so the vote
1851 * thread can make progress. Once we've done this we have to return
1852 * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
1853 * into the VFS who will then immediately retry the aop call.
1855 * We do a blocking lock and immediate unlock before returning, though, so that
1856 * the lock has a great chance of being cached on this node by the time the VFS
1857 * calls back to retry the aop. This has a potential to livelock as nodes
1858 * ping locks back and forth, but that's a risk we're willing to take to avoid
1859 * the lock inversion simply.
1861 int ocfs2_meta_lock_with_page(struct inode *inode,
1862 struct buffer_head **ret_bh,
1868 ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
1869 if (ret == -EAGAIN) {
1871 if (ocfs2_meta_lock(inode, ret_bh, ex) == 0)
1872 ocfs2_meta_unlock(inode, ex);
1873 ret = AOP_TRUNCATED_PAGE;
1879 int ocfs2_meta_lock_atime(struct inode *inode,
1880 struct vfsmount *vfsmnt,
1886 ret = ocfs2_meta_lock(inode, NULL, 0);
1893 * If we should update atime, we will get EX lock,
1894 * otherwise we just get PR lock.
1896 if (ocfs2_should_update_atime(inode, vfsmnt)) {
1897 struct buffer_head *bh = NULL;
1899 ocfs2_meta_unlock(inode, 0);
1900 ret = ocfs2_meta_lock(inode, &bh, 1);
1906 if (ocfs2_should_update_atime(inode, vfsmnt))
1907 ocfs2_update_inode_atime(inode, bh);
1917 void ocfs2_meta_unlock(struct inode *inode,
1920 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1921 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1922 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1926 mlog(0, "inode %llu drop %s META lock\n",
1927 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1928 ex ? "EXMODE" : "PRMODE");
1930 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1931 !ocfs2_mount_local(osb))
1932 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1937 int ocfs2_super_lock(struct ocfs2_super *osb,
1941 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1942 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1943 struct buffer_head *bh;
1944 struct ocfs2_slot_info *si = osb->slot_info;
1948 if (ocfs2_is_hard_readonly(osb))
1951 if (ocfs2_mount_local(osb))
1954 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1960 /* The super block lock path is really in the best position to
1961 * know when resources covered by the lock need to be
1962 * refreshed, so we do it here. Of course, making sense of
1963 * everything is up to the caller :) */
1964 status = ocfs2_should_refresh_lock_res(lockres);
1971 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1974 ocfs2_update_slot_info(si);
1976 ocfs2_complete_lock_res_refresh(lockres, status);
1986 void ocfs2_super_unlock(struct ocfs2_super *osb,
1989 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1990 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1992 if (!ocfs2_mount_local(osb))
1993 ocfs2_cluster_unlock(osb, lockres, level);
1996 int ocfs2_rename_lock(struct ocfs2_super *osb)
1999 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2001 if (ocfs2_is_hard_readonly(osb))
2004 if (ocfs2_mount_local(osb))
2007 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
2014 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2016 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2018 if (!ocfs2_mount_local(osb))
2019 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
2022 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2025 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2026 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2027 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2031 if (ocfs2_is_hard_readonly(osb))
2034 if (ocfs2_mount_local(osb))
2037 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2044 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2046 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2047 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2048 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2050 if (!ocfs2_mount_local(osb))
2051 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2054 /* Reference counting of the dlm debug structure. We want this because
2055 * open references on the debug inodes can live on after a mount, so
2056 * we can't rely on the ocfs2_super to always exist. */
2057 static void ocfs2_dlm_debug_free(struct kref *kref)
2059 struct ocfs2_dlm_debug *dlm_debug;
2061 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2066 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2069 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2072 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2074 kref_get(&debug->d_refcnt);
2077 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2079 struct ocfs2_dlm_debug *dlm_debug;
2081 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2083 mlog_errno(-ENOMEM);
2087 kref_init(&dlm_debug->d_refcnt);
2088 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2089 dlm_debug->d_locking_state = NULL;
2094 /* Access to this is arbitrated for us via seq_file->sem. */
2095 struct ocfs2_dlm_seq_priv {
2096 struct ocfs2_dlm_debug *p_dlm_debug;
2097 struct ocfs2_lock_res p_iter_res;
2098 struct ocfs2_lock_res p_tmp_res;
2101 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2102 struct ocfs2_dlm_seq_priv *priv)
2104 struct ocfs2_lock_res *iter, *ret = NULL;
2105 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2107 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2109 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2110 /* discover the head of the list */
2111 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2112 mlog(0, "End of list found, %p\n", ret);
2116 /* We track our "dummy" iteration lockres' by a NULL
2118 if (iter->l_ops != NULL) {
2127 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2129 struct ocfs2_dlm_seq_priv *priv = m->private;
2130 struct ocfs2_lock_res *iter;
2132 spin_lock(&ocfs2_dlm_tracking_lock);
2133 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2135 /* Since lockres' have the lifetime of their container
2136 * (which can be inodes, ocfs2_supers, etc) we want to
2137 * copy this out to a temporary lockres while still
2138 * under the spinlock. Obviously after this we can't
2139 * trust any pointers on the copy returned, but that's
2140 * ok as the information we want isn't typically held
2142 priv->p_tmp_res = *iter;
2143 iter = &priv->p_tmp_res;
2145 spin_unlock(&ocfs2_dlm_tracking_lock);
2150 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2154 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2156 struct ocfs2_dlm_seq_priv *priv = m->private;
2157 struct ocfs2_lock_res *iter = v;
2158 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2160 spin_lock(&ocfs2_dlm_tracking_lock);
2161 iter = ocfs2_dlm_next_res(iter, priv);
2162 list_del_init(&dummy->l_debug_list);
2164 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2165 priv->p_tmp_res = *iter;
2166 iter = &priv->p_tmp_res;
2168 spin_unlock(&ocfs2_dlm_tracking_lock);
2173 /* So that debugfs.ocfs2 can determine which format is being used */
2174 #define OCFS2_DLM_DEBUG_STR_VERSION 1
2175 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2179 struct ocfs2_lock_res *lockres = v;
2184 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2186 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2187 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2189 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2191 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2193 seq_printf(m, "%d\t"
2204 lockres->l_unlock_action,
2205 lockres->l_ro_holders,
2206 lockres->l_ex_holders,
2207 lockres->l_requested,
2208 lockres->l_blocking);
2210 /* Dump the raw LVB */
2211 lvb = lockres->l_lksb.lvb;
2212 for(i = 0; i < DLM_LVB_LEN; i++)
2213 seq_printf(m, "0x%x\t", lvb[i]);
2216 seq_printf(m, "\n");
2220 static struct seq_operations ocfs2_dlm_seq_ops = {
2221 .start = ocfs2_dlm_seq_start,
2222 .stop = ocfs2_dlm_seq_stop,
2223 .next = ocfs2_dlm_seq_next,
2224 .show = ocfs2_dlm_seq_show,
2227 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2229 struct seq_file *seq = (struct seq_file *) file->private_data;
2230 struct ocfs2_dlm_seq_priv *priv = seq->private;
2231 struct ocfs2_lock_res *res = &priv->p_iter_res;
2233 ocfs2_remove_lockres_tracking(res);
2234 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2235 return seq_release_private(inode, file);
2238 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2241 struct ocfs2_dlm_seq_priv *priv;
2242 struct seq_file *seq;
2243 struct ocfs2_super *osb;
2245 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2251 osb = inode->i_private;
2252 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2253 priv->p_dlm_debug = osb->osb_dlm_debug;
2254 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2256 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2263 seq = (struct seq_file *) file->private_data;
2264 seq->private = priv;
2266 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2273 static const struct file_operations ocfs2_dlm_debug_fops = {
2274 .open = ocfs2_dlm_debug_open,
2275 .release = ocfs2_dlm_debug_release,
2277 .llseek = seq_lseek,
2280 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2283 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2285 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2287 osb->osb_debug_root,
2289 &ocfs2_dlm_debug_fops);
2290 if (!dlm_debug->d_locking_state) {
2293 "Unable to create locking state debugfs file.\n");
2297 ocfs2_get_dlm_debug(dlm_debug);
2302 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2304 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2307 debugfs_remove(dlm_debug->d_locking_state);
2308 ocfs2_put_dlm_debug(dlm_debug);
2312 int ocfs2_dlm_init(struct ocfs2_super *osb)
2316 struct dlm_ctxt *dlm = NULL;
2320 if (ocfs2_mount_local(osb))
2323 status = ocfs2_dlm_init_debug(osb);
2329 /* launch vote thread */
2330 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
2331 if (IS_ERR(osb->vote_task)) {
2332 status = PTR_ERR(osb->vote_task);
2333 osb->vote_task = NULL;
2338 /* used by the dlm code to make message headers unique, each
2339 * node in this domain must agree on this. */
2340 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2342 /* for now, uuid == domain */
2343 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2345 status = PTR_ERR(dlm);
2350 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2353 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2354 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2361 ocfs2_dlm_shutdown_debug(osb);
2363 kthread_stop(osb->vote_task);
2370 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2374 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2376 ocfs2_drop_osb_locks(osb);
2378 if (osb->vote_task) {
2379 kthread_stop(osb->vote_task);
2380 osb->vote_task = NULL;
2383 ocfs2_lock_res_free(&osb->osb_super_lockres);
2384 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2386 dlm_unregister_domain(osb->dlm);
2389 ocfs2_dlm_shutdown_debug(osb);
2394 static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
2396 struct ocfs2_lock_res *lockres = opaque;
2397 unsigned long flags;
2401 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2402 lockres->l_unlock_action);
2404 spin_lock_irqsave(&lockres->l_lock, flags);
2405 /* We tried to cancel a convert request, but it was already
2406 * granted. All we want to do here is clear our unlock
2407 * state. The wake_up call done at the bottom is redundant
2408 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2409 * hurt anything anyway */
2410 if (status == DLM_CANCELGRANT &&
2411 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2412 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2414 /* We don't clear the busy flag in this case as it
2415 * should have been cleared by the ast which the dlm
2417 goto complete_unlock;
2420 if (status != DLM_NORMAL) {
2421 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2422 "unlock_action %d\n", status, lockres->l_name,
2423 lockres->l_unlock_action);
2424 spin_unlock_irqrestore(&lockres->l_lock, flags);
2428 switch(lockres->l_unlock_action) {
2429 case OCFS2_UNLOCK_CANCEL_CONVERT:
2430 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2431 lockres->l_action = OCFS2_AST_INVALID;
2433 case OCFS2_UNLOCK_DROP_LOCK:
2434 lockres->l_level = LKM_IVMODE;
2440 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2442 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2443 spin_unlock_irqrestore(&lockres->l_lock, flags);
2445 wake_up(&lockres->l_event);
2450 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2451 struct ocfs2_lock_res *lockres)
2453 enum dlm_status status;
2454 unsigned long flags;
2457 /* We didn't get anywhere near actually using this lockres. */
2458 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2461 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2462 lkm_flags |= LKM_VALBLK;
2464 spin_lock_irqsave(&lockres->l_lock, flags);
2466 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2467 "lockres %s, flags 0x%lx\n",
2468 lockres->l_name, lockres->l_flags);
2470 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2471 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2472 "%u, unlock_action = %u\n",
2473 lockres->l_name, lockres->l_flags, lockres->l_action,
2474 lockres->l_unlock_action);
2476 spin_unlock_irqrestore(&lockres->l_lock, flags);
2478 /* XXX: Today we just wait on any busy
2479 * locks... Perhaps we need to cancel converts in the
2481 ocfs2_wait_on_busy_lock(lockres);
2483 spin_lock_irqsave(&lockres->l_lock, flags);
2486 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2487 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2488 lockres->l_level == LKM_EXMODE &&
2489 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2490 lockres->l_ops->set_lvb(lockres);
2493 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2494 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2496 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2497 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2499 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2500 spin_unlock_irqrestore(&lockres->l_lock, flags);
2504 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2506 /* make sure we never get here while waiting for an ast to
2508 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2510 /* is this necessary? */
2511 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2512 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2513 spin_unlock_irqrestore(&lockres->l_lock, flags);
2515 mlog(0, "lock %s\n", lockres->l_name);
2517 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2518 ocfs2_unlock_ast, lockres);
2519 if (status != DLM_NORMAL) {
2520 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2521 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2522 dlm_print_one_lock(lockres->l_lksb.lockid);
2525 mlog(0, "lock %s, successfull return from dlmunlock\n",
2528 ocfs2_wait_on_busy_lock(lockres);
2534 /* Mark the lockres as being dropped. It will no longer be
2535 * queued if blocking, but we still may have to wait on it
2536 * being dequeued from the vote thread before we can consider
2539 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2540 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2543 struct ocfs2_mask_waiter mw;
2544 unsigned long flags;
2546 ocfs2_init_mask_waiter(&mw);
2548 spin_lock_irqsave(&lockres->l_lock, flags);
2549 lockres->l_flags |= OCFS2_LOCK_FREEING;
2550 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2551 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2552 spin_unlock_irqrestore(&lockres->l_lock, flags);
2554 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2556 status = ocfs2_wait_for_mask(&mw);
2560 spin_lock_irqsave(&lockres->l_lock, flags);
2562 spin_unlock_irqrestore(&lockres->l_lock, flags);
2565 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2566 struct ocfs2_lock_res *lockres)
2570 ocfs2_mark_lockres_freeing(lockres);
2571 ret = ocfs2_drop_lock(osb, lockres);
2576 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2578 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2579 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2582 int ocfs2_drop_inode_locks(struct inode *inode)
2588 /* No need to call ocfs2_mark_lockres_freeing here -
2589 * ocfs2_clear_inode has done it for us. */
2591 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2592 &OCFS2_I(inode)->ip_open_lockres);
2598 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2599 &OCFS2_I(inode)->ip_data_lockres);
2602 if (err < 0 && !status)
2605 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2606 &OCFS2_I(inode)->ip_meta_lockres);
2609 if (err < 0 && !status)
2612 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2613 &OCFS2_I(inode)->ip_rw_lockres);
2616 if (err < 0 && !status)
2623 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2626 assert_spin_locked(&lockres->l_lock);
2628 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2630 if (lockres->l_level <= new_level) {
2631 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2632 lockres->l_level, new_level);
2636 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2637 lockres->l_name, new_level, lockres->l_blocking);
2639 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2640 lockres->l_requested = new_level;
2641 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2644 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2645 struct ocfs2_lock_res *lockres,
2649 int ret, dlm_flags = LKM_CONVERT;
2650 enum dlm_status status;
2655 dlm_flags |= LKM_VALBLK;
2657 status = dlmlock(osb->dlm,
2662 OCFS2_LOCK_ID_MAX_LEN - 1,
2665 ocfs2_blocking_ast);
2666 if (status != DLM_NORMAL) {
2667 ocfs2_log_dlm_error("dlmlock", status, lockres);
2669 ocfs2_recover_from_dlm_error(lockres, 1);
2679 /* returns 1 when the caller should unlock and call dlmunlock */
2680 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2681 struct ocfs2_lock_res *lockres)
2683 assert_spin_locked(&lockres->l_lock);
2686 mlog(0, "lock %s\n", lockres->l_name);
2688 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2689 /* If we're already trying to cancel a lock conversion
2690 * then just drop the spinlock and allow the caller to
2691 * requeue this lock. */
2693 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2697 /* were we in a convert when we got the bast fire? */
2698 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2699 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2700 /* set things up for the unlockast to know to just
2701 * clear out the ast_action and unset busy, etc. */
2702 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2704 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2705 "lock %s, invalid flags: 0x%lx\n",
2706 lockres->l_name, lockres->l_flags);
2711 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2712 struct ocfs2_lock_res *lockres)
2715 enum dlm_status status;
2718 mlog(0, "lock %s\n", lockres->l_name);
2721 status = dlmunlock(osb->dlm,
2726 if (status != DLM_NORMAL) {
2727 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2729 ocfs2_recover_from_dlm_error(lockres, 0);
2732 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2738 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2739 struct ocfs2_lock_res *lockres,
2740 struct ocfs2_unblock_ctl *ctl)
2742 unsigned long flags;
2750 spin_lock_irqsave(&lockres->l_lock, flags);
2752 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2755 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2757 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2758 spin_unlock_irqrestore(&lockres->l_lock, flags);
2760 ret = ocfs2_cancel_convert(osb, lockres);
2767 /* if we're blocking an exclusive and we have *any* holders,
2769 if ((lockres->l_blocking == LKM_EXMODE)
2770 && (lockres->l_ex_holders || lockres->l_ro_holders))
2773 /* If it's a PR we're blocking, then only
2774 * requeue if we've got any EX holders */
2775 if (lockres->l_blocking == LKM_PRMODE &&
2776 lockres->l_ex_holders)
2780 * Can we get a lock in this state if the holder counts are
2781 * zero? The meta data unblock code used to check this.
2783 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2784 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2787 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2789 if (lockres->l_ops->check_downconvert
2790 && !lockres->l_ops->check_downconvert(lockres, new_level))
2793 /* If we get here, then we know that there are no more
2794 * incompatible holders (and anyone asking for an incompatible
2795 * lock is blocked). We can now downconvert the lock */
2796 if (!lockres->l_ops->downconvert_worker)
2799 /* Some lockres types want to do a bit of work before
2800 * downconverting a lock. Allow that here. The worker function
2801 * may sleep, so we save off a copy of what we're blocking as
2802 * it may change while we're not holding the spin lock. */
2803 blocking = lockres->l_blocking;
2804 spin_unlock_irqrestore(&lockres->l_lock, flags);
2806 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2808 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2811 spin_lock_irqsave(&lockres->l_lock, flags);
2812 if (blocking != lockres->l_blocking) {
2813 /* If this changed underneath us, then we can't drop
2821 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2822 if (lockres->l_level == LKM_EXMODE)
2826 * We only set the lvb if the lock has been fully
2827 * refreshed - otherwise we risk setting stale
2828 * data. Otherwise, there's no need to actually clear
2829 * out the lvb here as it's value is still valid.
2831 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2832 lockres->l_ops->set_lvb(lockres);
2835 ocfs2_prepare_downconvert(lockres, new_level);
2836 spin_unlock_irqrestore(&lockres->l_lock, flags);
2837 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2843 spin_unlock_irqrestore(&lockres->l_lock, flags);
2850 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2853 struct inode *inode;
2854 struct address_space *mapping;
2856 inode = ocfs2_lock_res_inode(lockres);
2857 mapping = inode->i_mapping;
2860 * We need this before the filemap_fdatawrite() so that it can
2861 * transfer the dirty bit from the PTE to the
2862 * page. Unfortunately this means that even for EX->PR
2863 * downconverts, we'll lose our mappings and have to build
2866 unmap_mapping_range(mapping, 0, 0, 0);
2868 if (filemap_fdatawrite(mapping)) {
2869 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2870 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2872 sync_mapping_buffers(mapping);
2873 if (blocking == LKM_EXMODE) {
2874 truncate_inode_pages(mapping, 0);
2876 /* We only need to wait on the I/O if we're not also
2877 * truncating pages because truncate_inode_pages waits
2878 * for us above. We don't truncate pages if we're
2879 * blocking anything < EXMODE because we want to keep
2880 * them around in that case. */
2881 filemap_fdatawait(mapping);
2884 return UNBLOCK_CONTINUE;
2887 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
2890 struct inode *inode = ocfs2_lock_res_inode(lockres);
2891 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
2893 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2894 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
2899 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
2903 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
2905 struct inode *inode = ocfs2_lock_res_inode(lockres);
2907 __ocfs2_stuff_meta_lvb(inode);
2911 * Does the final reference drop on our dentry lock. Right now this
2912 * happens in the vote thread, but we could choose to simplify the
2913 * dlmglue API and push these off to the ocfs2_wq in the future.
2915 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
2916 struct ocfs2_lock_res *lockres)
2918 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2919 ocfs2_dentry_lock_put(osb, dl);
2923 * d_delete() matching dentries before the lock downconvert.
2925 * At this point, any process waiting to destroy the
2926 * dentry_lock due to last ref count is stopped by the
2927 * OCFS2_LOCK_QUEUED flag.
2929 * We have two potential problems
2931 * 1) If we do the last reference drop on our dentry_lock (via dput)
2932 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
2933 * the downconvert to finish. Instead we take an elevated
2934 * reference and push the drop until after we've completed our
2935 * unblock processing.
2937 * 2) There might be another process with a final reference,
2938 * waiting on us to finish processing. If this is the case, we
2939 * detect it and exit out - there's no more dentries anyway.
2941 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
2944 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2945 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
2946 struct dentry *dentry;
2947 unsigned long flags;
2951 * This node is blocking another node from getting a read
2952 * lock. This happens when we've renamed within a
2953 * directory. We've forced the other nodes to d_delete(), but
2954 * we never actually dropped our lock because it's still
2955 * valid. The downconvert code will retain a PR for this node,
2956 * so there's no further work to do.
2958 if (blocking == LKM_PRMODE)
2959 return UNBLOCK_CONTINUE;
2962 * Mark this inode as potentially orphaned. The code in
2963 * ocfs2_delete_inode() will figure out whether it actually
2964 * needs to be freed or not.
2966 spin_lock(&oi->ip_lock);
2967 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2968 spin_unlock(&oi->ip_lock);
2971 * Yuck. We need to make sure however that the check of
2972 * OCFS2_LOCK_FREEING and the extra reference are atomic with
2973 * respect to a reference decrement or the setting of that
2976 spin_lock_irqsave(&lockres->l_lock, flags);
2977 spin_lock(&dentry_attach_lock);
2978 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
2983 spin_unlock(&dentry_attach_lock);
2984 spin_unlock_irqrestore(&lockres->l_lock, flags);
2986 mlog(0, "extra_ref = %d\n", extra_ref);
2989 * We have a process waiting on us in ocfs2_dentry_iput(),
2990 * which means we can't have any more outstanding
2991 * aliases. There's no need to do any more work.
2994 return UNBLOCK_CONTINUE;
2996 spin_lock(&dentry_attach_lock);
2998 dentry = ocfs2_find_local_alias(dl->dl_inode,
2999 dl->dl_parent_blkno, 1);
3002 spin_unlock(&dentry_attach_lock);
3004 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3005 dentry->d_name.name);
3008 * The following dcache calls may do an
3009 * iput(). Normally we don't want that from the
3010 * downconverting thread, but in this case it's ok
3011 * because the requesting node already has an
3012 * exclusive lock on the inode, so it can't be queued
3013 * for a downconvert.
3018 spin_lock(&dentry_attach_lock);
3020 spin_unlock(&dentry_attach_lock);
3023 * If we are the last holder of this dentry lock, there is no
3024 * reason to downconvert so skip straight to the unlock.
3026 if (dl->dl_count == 1)
3027 return UNBLOCK_STOP_POST;
3029 return UNBLOCK_CONTINUE_POST;
3032 void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3033 struct ocfs2_lock_res *lockres)
3036 struct ocfs2_unblock_ctl ctl = {0, 0,};
3037 unsigned long flags;
3039 /* Our reference to the lockres in this function can be
3040 * considered valid until we remove the OCFS2_LOCK_QUEUED
3046 BUG_ON(!lockres->l_ops);
3048 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3050 /* Detect whether a lock has been marked as going away while
3051 * the vote thread was processing other things. A lock can
3052 * still be marked with OCFS2_LOCK_FREEING after this check,
3053 * but short circuiting here will still save us some
3055 spin_lock_irqsave(&lockres->l_lock, flags);
3056 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3058 spin_unlock_irqrestore(&lockres->l_lock, flags);
3060 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3064 spin_lock_irqsave(&lockres->l_lock, flags);
3066 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3067 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3069 ocfs2_schedule_blocked_lock(osb, lockres);
3071 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3072 ctl.requeue ? "yes" : "no");
3073 spin_unlock_irqrestore(&lockres->l_lock, flags);
3075 if (ctl.unblock_action != UNBLOCK_CONTINUE
3076 && lockres->l_ops->post_unlock)
3077 lockres->l_ops->post_unlock(osb, lockres);
3082 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3083 struct ocfs2_lock_res *lockres)
3087 assert_spin_locked(&lockres->l_lock);
3089 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3090 /* Do not schedule a lock for downconvert when it's on
3091 * the way to destruction - any nodes wanting access
3092 * to the resource will get it soon. */
3093 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3094 lockres->l_name, lockres->l_flags);
3098 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3100 spin_lock(&osb->vote_task_lock);
3101 if (list_empty(&lockres->l_blocked_list)) {
3102 list_add_tail(&lockres->l_blocked_list,
3103 &osb->blocked_lock_list);
3104 osb->blocked_lock_count++;
3106 spin_unlock(&osb->vote_task_lock);