1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements the kernel side of a minimal userspace
7 * interface to our DLM.
9 * Many of the functions here are pared down versions of dlmglue.c
12 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public
16 * License as published by the Free Software Foundation; either
17 * version 2 of the License, or (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public
25 * License along with this program; if not, write to the
26 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
27 * Boston, MA 021110-1307, USA.
30 #include <linux/signal.h>
32 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/crc32.h>
38 #include "cluster/nodemanager.h"
39 #include "cluster/heartbeat.h"
40 #include "cluster/tcp.h"
46 #define MLOG_MASK_PREFIX ML_DLMFS
47 #include "cluster/masklog.h"
49 static inline int user_check_wait_flag(struct user_lock_res *lockres,
54 spin_lock(&lockres->l_lock);
55 ret = lockres->l_flags & flag;
56 spin_unlock(&lockres->l_lock);
61 static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
64 wait_event(lockres->l_event,
65 !user_check_wait_flag(lockres, USER_LOCK_BUSY));
68 static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
71 wait_event(lockres->l_event,
72 !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
75 /* I heart container_of... */
76 static inline struct dlm_ctxt *
77 dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
79 struct dlmfs_inode_private *ip;
81 ip = container_of(lockres,
82 struct dlmfs_inode_private,
88 user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
90 struct dlmfs_inode_private *ip;
92 ip = container_of(lockres,
93 struct dlmfs_inode_private,
95 return &ip->ip_vfs_inode;
98 static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
100 spin_lock(&lockres->l_lock);
101 lockres->l_flags &= ~USER_LOCK_BUSY;
102 spin_unlock(&lockres->l_lock);
105 #define user_log_dlm_error(_func, _stat, _lockres) do { \
106 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
107 "resource %s: %s\n", dlm_errname(_stat), _func, \
108 _lockres->l_name, dlm_errmsg(_stat)); \
111 /* WARNING: This function lives in a world where the only three lock
112 * levels are EX, PR, and NL. It *will* have to be adjusted when more
113 * lock types are added. */
114 static inline int user_highest_compat_lock_level(int level)
116 int new_level = LKM_EXMODE;
118 if (level == LKM_EXMODE)
119 new_level = LKM_NLMODE;
120 else if (level == LKM_PRMODE)
121 new_level = LKM_PRMODE;
125 static void user_ast(void *opaque)
127 struct user_lock_res *lockres = opaque;
128 struct dlm_lockstatus *lksb;
130 mlog(0, "AST fired for lockres %s\n", lockres->l_name);
132 spin_lock(&lockres->l_lock);
134 lksb = &(lockres->l_lksb);
135 if (lksb->status != DLM_NORMAL) {
136 mlog(ML_ERROR, "lksb status value of %u on lockres %s\n",
137 lksb->status, lockres->l_name);
138 spin_unlock(&lockres->l_lock);
142 mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
143 "Lockres %s, requested ivmode. flags 0x%x\n",
144 lockres->l_name, lockres->l_flags);
146 /* we're downconverting. */
147 if (lockres->l_requested < lockres->l_level) {
148 if (lockres->l_requested <=
149 user_highest_compat_lock_level(lockres->l_blocking)) {
150 lockres->l_blocking = LKM_NLMODE;
151 lockres->l_flags &= ~USER_LOCK_BLOCKED;
155 lockres->l_level = lockres->l_requested;
156 lockres->l_requested = LKM_IVMODE;
157 lockres->l_flags |= USER_LOCK_ATTACHED;
158 lockres->l_flags &= ~USER_LOCK_BUSY;
160 spin_unlock(&lockres->l_lock);
162 wake_up(&lockres->l_event);
165 static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
168 inode = user_dlm_inode_from_user_lockres(lockres);
173 static void user_dlm_unblock_lock(void *opaque);
175 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
177 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
178 user_dlm_grab_inode_ref(lockres);
180 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
183 queue_work(user_dlm_worker, &lockres->l_work);
184 lockres->l_flags |= USER_LOCK_QUEUED;
188 static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
192 if (!(lockres->l_flags & USER_LOCK_BLOCKED))
195 switch (lockres->l_blocking) {
197 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
201 if (!lockres->l_ex_holders)
209 __user_dlm_queue_lockres(lockres);
212 static void user_bast(void *opaque, int level)
214 struct user_lock_res *lockres = opaque;
216 mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n",
217 lockres->l_name, level);
219 spin_lock(&lockres->l_lock);
220 lockres->l_flags |= USER_LOCK_BLOCKED;
221 if (level > lockres->l_blocking)
222 lockres->l_blocking = level;
224 __user_dlm_queue_lockres(lockres);
225 spin_unlock(&lockres->l_lock);
227 wake_up(&lockres->l_event);
230 static void user_unlock_ast(void *opaque, enum dlm_status status)
232 struct user_lock_res *lockres = opaque;
234 mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name);
236 if (status != DLM_NORMAL)
237 mlog(ML_ERROR, "Dlm returns status %d\n", status);
239 spin_lock(&lockres->l_lock);
240 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN)
241 lockres->l_level = LKM_IVMODE;
243 lockres->l_requested = LKM_IVMODE; /* cancel an
246 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
247 /* we want the unblock thread to look at it again
249 __user_dlm_queue_lockres(lockres);
252 lockres->l_flags &= ~USER_LOCK_BUSY;
253 spin_unlock(&lockres->l_lock);
255 wake_up(&lockres->l_event);
258 static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
261 inode = user_dlm_inode_from_user_lockres(lockres);
265 static void user_dlm_unblock_lock(void *opaque)
267 int new_level, status;
268 struct user_lock_res *lockres = (struct user_lock_res *) opaque;
269 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
271 mlog(0, "processing lockres %s\n", lockres->l_name);
273 spin_lock(&lockres->l_lock);
275 mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
276 "Lockres %s, flags 0x%x\n",
277 lockres->l_name, lockres->l_flags);
279 /* notice that we don't clear USER_LOCK_BLOCKED here. If it's
280 * set, we want user_ast clear it. */
281 lockres->l_flags &= ~USER_LOCK_QUEUED;
283 /* It's valid to get here and no longer be blocked - if we get
284 * several basts in a row, we might be queued by the first
285 * one, the unblock thread might run and clear the queued
286 * flag, and finally we might get another bast which re-queues
287 * us before our ast for the downconvert is called. */
288 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
289 mlog(0, "Lockres %s, flags 0x%x: queued but not blocking\n",
290 lockres->l_name, lockres->l_flags);
291 spin_unlock(&lockres->l_lock);
295 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
296 mlog(0, "lock is in teardown so we do nothing\n");
297 spin_unlock(&lockres->l_lock);
301 if (lockres->l_flags & USER_LOCK_BUSY) {
302 mlog(0, "BUSY flag detected...\n");
303 if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
304 spin_unlock(&lockres->l_lock);
308 lockres->l_flags |= USER_LOCK_IN_CANCEL;
309 spin_unlock(&lockres->l_lock);
311 status = dlmunlock(dlm,
316 if (status == DLM_CANCELGRANT) {
317 /* If we got this, then the ast was fired
318 * before we could cancel. We cleanup our
319 * state, and restart the function. */
320 spin_lock(&lockres->l_lock);
321 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
322 spin_unlock(&lockres->l_lock);
323 } else if (status != DLM_NORMAL)
324 user_log_dlm_error("dlmunlock", status, lockres);
328 /* If there are still incompat holders, we can exit safely
329 * without worrying about re-queueing this lock as that will
330 * happen on the last call to user_cluster_unlock. */
331 if ((lockres->l_blocking == LKM_EXMODE)
332 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
333 spin_unlock(&lockres->l_lock);
334 mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
335 lockres->l_ro_holders, lockres->l_ex_holders);
339 if ((lockres->l_blocking == LKM_PRMODE)
340 && lockres->l_ex_holders) {
341 spin_unlock(&lockres->l_lock);
342 mlog(0, "can't downconvert for pr: ex = %u\n",
343 lockres->l_ex_holders);
347 /* yay, we can downconvert now. */
348 new_level = user_highest_compat_lock_level(lockres->l_blocking);
349 lockres->l_requested = new_level;
350 lockres->l_flags |= USER_LOCK_BUSY;
351 mlog(0, "Downconvert lock from %d to %d\n",
352 lockres->l_level, new_level);
353 spin_unlock(&lockres->l_lock);
355 /* need lock downconvert request now... */
356 status = dlmlock(dlm,
359 LKM_CONVERT|LKM_VALBLK,
364 if (status != DLM_NORMAL) {
365 user_log_dlm_error("dlmlock", status, lockres);
366 user_recover_from_dlm_error(lockres);
370 user_dlm_drop_inode_ref(lockres);
373 static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
378 lockres->l_ex_holders++;
381 lockres->l_ro_holders++;
388 /* predict what lock level we'll be dropping down to on behalf
389 * of another node, and return true if the currently wanted
390 * level will be compatible with it. */
392 user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
395 BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
397 return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
400 int user_dlm_cluster_lock(struct user_lock_res *lockres,
404 int status, local_flags;
405 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
407 if (level != LKM_EXMODE &&
408 level != LKM_PRMODE) {
409 mlog(ML_ERROR, "lockres %s: invalid request!\n",
415 mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n",
417 (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
421 if (signal_pending(current)) {
422 status = -ERESTARTSYS;
426 spin_lock(&lockres->l_lock);
428 /* We only compare against the currently granted level
429 * here. If the lock is blocked waiting on a downconvert,
430 * we'll get caught below. */
431 if ((lockres->l_flags & USER_LOCK_BUSY) &&
432 (level > lockres->l_level)) {
433 /* is someone sitting in dlm_lock? If so, wait on
435 spin_unlock(&lockres->l_lock);
437 user_wait_on_busy_lock(lockres);
441 if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
442 (!user_may_continue_on_blocked_lock(lockres, level))) {
443 /* is the lock is currently blocked on behalf of
445 spin_unlock(&lockres->l_lock);
447 user_wait_on_blocked_lock(lockres);
451 if (level > lockres->l_level) {
452 local_flags = lkm_flags | LKM_VALBLK;
453 if (lockres->l_level != LKM_IVMODE)
454 local_flags |= LKM_CONVERT;
456 lockres->l_requested = level;
457 lockres->l_flags |= USER_LOCK_BUSY;
458 spin_unlock(&lockres->l_lock);
460 BUG_ON(level == LKM_IVMODE);
461 BUG_ON(level == LKM_NLMODE);
463 mlog(0, "lock %s, get lock from %d to level = %d\n",
464 lockres->l_name, lockres->l_level, level);
466 /* call dlm_lock to upgrade lock now */
467 status = dlmlock(dlm,
475 if (status != DLM_NORMAL) {
476 if ((lkm_flags & LKM_NOQUEUE) &&
477 (status == DLM_NOTQUEUED))
480 user_log_dlm_error("dlmlock", status, lockres);
483 user_recover_from_dlm_error(lockres);
487 mlog(0, "lock %s, successfull return from dlmlock\n",
490 user_wait_on_busy_lock(lockres);
494 user_dlm_inc_holders(lockres, level);
495 spin_unlock(&lockres->l_lock);
497 mlog(0, "lockres %s: Got %s lock!\n", lockres->l_name,
498 (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE");
505 static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
510 BUG_ON(!lockres->l_ex_holders);
511 lockres->l_ex_holders--;
514 BUG_ON(!lockres->l_ro_holders);
515 lockres->l_ro_holders--;
522 void user_dlm_cluster_unlock(struct user_lock_res *lockres,
525 if (level != LKM_EXMODE &&
526 level != LKM_PRMODE) {
527 mlog(ML_ERROR, "lockres %s: invalid request!\n", lockres->l_name);
531 mlog(0, "lockres %s: dropping %s lock\n", lockres->l_name,
532 (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE");
534 spin_lock(&lockres->l_lock);
535 user_dlm_dec_holders(lockres, level);
536 __user_dlm_cond_queue_lockres(lockres);
537 spin_unlock(&lockres->l_lock);
540 void user_dlm_write_lvb(struct inode *inode,
544 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
545 char *lvb = lockres->l_lksb.lvb;
547 BUG_ON(len > DLM_LVB_LEN);
549 spin_lock(&lockres->l_lock);
551 BUG_ON(lockres->l_level < LKM_EXMODE);
552 memcpy(lvb, val, len);
554 spin_unlock(&lockres->l_lock);
557 void user_dlm_read_lvb(struct inode *inode,
561 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
562 char *lvb = lockres->l_lksb.lvb;
564 BUG_ON(len > DLM_LVB_LEN);
566 spin_lock(&lockres->l_lock);
568 BUG_ON(lockres->l_level < LKM_PRMODE);
569 memcpy(val, lvb, len);
571 spin_unlock(&lockres->l_lock);
574 void user_dlm_lock_res_init(struct user_lock_res *lockres,
575 struct dentry *dentry)
577 memset(lockres, 0, sizeof(*lockres));
579 spin_lock_init(&lockres->l_lock);
580 init_waitqueue_head(&lockres->l_event);
581 lockres->l_level = LKM_IVMODE;
582 lockres->l_requested = LKM_IVMODE;
583 lockres->l_blocking = LKM_IVMODE;
585 /* should have been checked before getting here. */
586 BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
588 memcpy(lockres->l_name,
593 int user_dlm_destroy_lock(struct user_lock_res *lockres)
596 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
598 mlog(0, "asked to destroy %s\n", lockres->l_name);
600 spin_lock(&lockres->l_lock);
601 while (lockres->l_flags & USER_LOCK_BUSY) {
602 spin_unlock(&lockres->l_lock);
604 mlog(0, "lock %s is busy\n", lockres->l_name);
606 user_wait_on_busy_lock(lockres);
608 spin_lock(&lockres->l_lock);
611 if (lockres->l_ro_holders || lockres->l_ex_holders) {
612 spin_unlock(&lockres->l_lock);
613 mlog(0, "lock %s has holders\n", lockres->l_name);
618 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
619 spin_unlock(&lockres->l_lock);
620 mlog(0, "lock %s is not attached\n", lockres->l_name);
624 lockres->l_flags &= ~USER_LOCK_ATTACHED;
625 lockres->l_flags |= USER_LOCK_BUSY;
626 lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
627 spin_unlock(&lockres->l_lock);
629 mlog(0, "unlocking lockres %s\n", lockres->l_name);
630 status = dlmunlock(dlm,
635 if (status != DLM_NORMAL) {
636 user_log_dlm_error("dlmunlock", status, lockres);
641 user_wait_on_busy_lock(lockres);
648 struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
650 struct dlm_ctxt *dlm;
654 domain = kmalloc(name->len + 1, GFP_KERNEL);
657 return ERR_PTR(-ENOMEM);
660 dlm_key = crc32_le(0, name->name, name->len);
662 snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
664 dlm = dlm_register_domain(domain, dlm_key);
666 mlog_errno(PTR_ERR(dlm));
672 void user_dlm_unregister_context(struct dlm_ctxt *dlm)
674 dlm_unregister_domain(dlm);