]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/gfs2/locking/dlm/thread.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[linux-2.6-omap-h63xx.git] / fs / gfs2 / locking / dlm / thread.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include "lock_dlm.h"
11
12 /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
13    thread gets to it. */
14
15 static void queue_submit(struct gdlm_lock *lp)
16 {
17         struct gdlm_ls *ls = lp->ls;
18
19         spin_lock(&ls->async_lock);
20         list_add_tail(&lp->delay_list, &ls->submit);
21         spin_unlock(&ls->async_lock);
22         wake_up(&ls->thread_wait);
23 }
24
25 static void process_blocking(struct gdlm_lock *lp, int bast_mode)
26 {
27         struct gdlm_ls *ls = lp->ls;
28         unsigned int cb = 0;
29
30         switch (gdlm_make_lmstate(bast_mode)) {
31         case LM_ST_EXCLUSIVE:
32                 cb = LM_CB_NEED_E;
33                 break;
34         case LM_ST_DEFERRED:
35                 cb = LM_CB_NEED_D;
36                 break;
37         case LM_ST_SHARED:
38                 cb = LM_CB_NEED_S;
39                 break;
40         default:
41                 gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
42         }
43
44         ls->fscb(ls->sdp, cb, &lp->lockname);
45 }
46
47 static void wake_up_ast(struct gdlm_lock *lp)
48 {
49         clear_bit(LFL_AST_WAIT, &lp->flags);
50         smp_mb__after_clear_bit();
51         wake_up_bit(&lp->flags, LFL_AST_WAIT);
52 }
53
54 static void process_complete(struct gdlm_lock *lp)
55 {
56         struct gdlm_ls *ls = lp->ls;
57         struct lm_async_cb acb;
58         s16 prev_mode = lp->cur;
59
60         memset(&acb, 0, sizeof(acb));
61
62         if (lp->lksb.sb_status == -DLM_ECANCEL) {
63                 log_info("complete dlm cancel %x,%llx flags %lx",
64                          lp->lockname.ln_type,
65                          (unsigned long long)lp->lockname.ln_number,
66                          lp->flags);
67
68                 lp->req = lp->cur;
69                 acb.lc_ret |= LM_OUT_CANCELED;
70                 if (lp->cur == DLM_LOCK_IV)
71                         lp->lksb.sb_lkid = 0;
72                 goto out;
73         }
74
75         if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
76                 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
77                         log_info("unlock sb_status %d %x,%llx flags %lx",
78                                  lp->lksb.sb_status, lp->lockname.ln_type,
79                                  (unsigned long long)lp->lockname.ln_number,
80                                  lp->flags);
81                         return;
82                 }
83
84                 lp->cur = DLM_LOCK_IV;
85                 lp->req = DLM_LOCK_IV;
86                 lp->lksb.sb_lkid = 0;
87
88                 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
89                         gdlm_delete_lp(lp);
90                         return;
91                 }
92                 goto out;
93         }
94
95         if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
96                 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
97
98         if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
99                 if (lp->req == DLM_LOCK_PR)
100                         lp->req = DLM_LOCK_CW;
101                 else if (lp->req == DLM_LOCK_CW)
102                         lp->req = DLM_LOCK_PR;
103         }
104
105         /*
106          * A canceled lock request.  The lock was just taken off the delayed
107          * list and was never even submitted to dlm.
108          */
109
110         if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
111                 log_info("complete internal cancel %x,%llx",
112                          lp->lockname.ln_type,
113                          (unsigned long long)lp->lockname.ln_number);
114                 lp->req = lp->cur;
115                 acb.lc_ret |= LM_OUT_CANCELED;
116                 goto out;
117         }
118
119         /*
120          * An error occured.
121          */
122
123         if (lp->lksb.sb_status) {
124                 /* a "normal" error */
125                 if ((lp->lksb.sb_status == -EAGAIN) &&
126                     (lp->lkf & DLM_LKF_NOQUEUE)) {
127                         lp->req = lp->cur;
128                         if (lp->cur == DLM_LOCK_IV)
129                                 lp->lksb.sb_lkid = 0;
130                         goto out;
131                 }
132
133                 /* this could only happen with cancels I think */
134                 log_info("ast sb_status %d %x,%llx flags %lx",
135                          lp->lksb.sb_status, lp->lockname.ln_type,
136                          (unsigned long long)lp->lockname.ln_number,
137                          lp->flags);
138                 return;
139         }
140
141         /*
142          * This is an AST for an EX->EX conversion for sync_lvb from GFS.
143          */
144
145         if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
146                 wake_up_ast(lp);
147                 return;
148         }
149
150         /*
151          * A lock has been demoted to NL because it initially completed during
152          * BLOCK_LOCKS.  Now it must be requested in the originally requested
153          * mode.
154          */
155
156         if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
157                 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
158                             lp->lockname.ln_type,
159                             (unsigned long long)lp->lockname.ln_number);
160                 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
161                             lp->lockname.ln_type,
162                             (unsigned long long)lp->lockname.ln_number);
163
164                 lp->cur = DLM_LOCK_NL;
165                 lp->req = lp->prev_req;
166                 lp->prev_req = DLM_LOCK_IV;
167                 lp->lkf &= ~DLM_LKF_CONVDEADLK;
168
169                 set_bit(LFL_NOCACHE, &lp->flags);
170
171                 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
172                     !test_bit(LFL_NOBLOCK, &lp->flags))
173                         gdlm_queue_delayed(lp);
174                 else
175                         queue_submit(lp);
176                 return;
177         }
178
179         /*
180          * A request is granted during dlm recovery.  It may be granted
181          * because the locks of a failed node were cleared.  In that case,
182          * there may be inconsistent data beneath this lock and we must wait
183          * for recovery to complete to use it.  When gfs recovery is done this
184          * granted lock will be converted to NL and then reacquired in this
185          * granted state.
186          */
187
188         if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
189             !test_bit(LFL_NOBLOCK, &lp->flags) &&
190             lp->req != DLM_LOCK_NL) {
191
192                 lp->cur = lp->req;
193                 lp->prev_req = lp->req;
194                 lp->req = DLM_LOCK_NL;
195                 lp->lkf |= DLM_LKF_CONVERT;
196                 lp->lkf &= ~DLM_LKF_CONVDEADLK;
197
198                 log_debug("rereq %x,%llx id %x %d,%d",
199                           lp->lockname.ln_type,
200                           (unsigned long long)lp->lockname.ln_number,
201                           lp->lksb.sb_lkid, lp->cur, lp->req);
202
203                 set_bit(LFL_REREQUEST, &lp->flags);
204                 queue_submit(lp);
205                 return;
206         }
207
208         /*
209          * DLM demoted the lock to NL before it was granted so GFS must be
210          * told it cannot cache data for this lock.
211          */
212
213         if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
214                 set_bit(LFL_NOCACHE, &lp->flags);
215
216 out:
217         /*
218          * This is an internal lock_dlm lock
219          */
220
221         if (test_bit(LFL_INLOCK, &lp->flags)) {
222                 clear_bit(LFL_NOBLOCK, &lp->flags);
223                 lp->cur = lp->req;
224                 wake_up_ast(lp);
225                 return;
226         }
227
228         /*
229          * Normal completion of a lock request.  Tell GFS it now has the lock.
230          */
231
232         clear_bit(LFL_NOBLOCK, &lp->flags);
233         lp->cur = lp->req;
234
235         acb.lc_name = lp->lockname;
236         acb.lc_ret |= gdlm_make_lmstate(lp->cur);
237
238         if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
239             (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
240                 acb.lc_ret |= LM_OUT_CACHEABLE;
241
242         ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
243 }
244
245 static inline int no_work(struct gdlm_ls *ls, int blocking)
246 {
247         int ret;
248
249         spin_lock(&ls->async_lock);
250         ret = list_empty(&ls->complete) && list_empty(&ls->submit);
251         if (ret && blocking)
252                 ret = list_empty(&ls->blocking);
253         spin_unlock(&ls->async_lock);
254
255         return ret;
256 }
257
258 static inline int check_drop(struct gdlm_ls *ls)
259 {
260         if (!ls->drop_locks_count)
261                 return 0;
262
263         if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
264                 ls->drop_time = jiffies;
265                 if (ls->all_locks_count >= ls->drop_locks_count)
266                         return 1;
267         }
268         return 0;
269 }
270
271 static int gdlm_thread(void *data, int blist)
272 {
273         struct gdlm_ls *ls = (struct gdlm_ls *) data;
274         struct gdlm_lock *lp = NULL;
275         uint8_t complete, blocking, submit, drop;
276         DECLARE_WAITQUEUE(wait, current);
277
278         /* Only thread1 is allowed to do blocking callbacks since gfs
279            may wait for a completion callback within a blocking cb. */
280
281         while (!kthread_should_stop()) {
282                 set_current_state(TASK_INTERRUPTIBLE);
283                 add_wait_queue(&ls->thread_wait, &wait);
284                 if (no_work(ls, blist))
285                         schedule();
286                 remove_wait_queue(&ls->thread_wait, &wait);
287                 set_current_state(TASK_RUNNING);
288
289                 complete = blocking = submit = drop = 0;
290
291                 spin_lock(&ls->async_lock);
292
293                 if (blist && !list_empty(&ls->blocking)) {
294                         lp = list_entry(ls->blocking.next, struct gdlm_lock,
295                                         blist);
296                         list_del_init(&lp->blist);
297                         blocking = lp->bast_mode;
298                         lp->bast_mode = 0;
299                 } else if (!list_empty(&ls->complete)) {
300                         lp = list_entry(ls->complete.next, struct gdlm_lock,
301                                         clist);
302                         list_del_init(&lp->clist);
303                         complete = 1;
304                 } else if (!list_empty(&ls->submit)) {
305                         lp = list_entry(ls->submit.next, struct gdlm_lock,
306                                         delay_list);
307                         list_del_init(&lp->delay_list);
308                         submit = 1;
309                 }
310
311                 drop = check_drop(ls);
312                 spin_unlock(&ls->async_lock);
313
314                 if (complete)
315                         process_complete(lp);
316
317                 else if (blocking)
318                         process_blocking(lp, blocking);
319
320                 else if (submit)
321                         gdlm_do_lock(lp);
322
323                 if (drop)
324                         ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
325
326                 schedule();
327         }
328
329         return 0;
330 }
331
332 static int gdlm_thread1(void *data)
333 {
334         return gdlm_thread(data, 1);
335 }
336
337 static int gdlm_thread2(void *data)
338 {
339         return gdlm_thread(data, 0);
340 }
341
342 int gdlm_init_threads(struct gdlm_ls *ls)
343 {
344         struct task_struct *p;
345         int error;
346
347         p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
348         error = IS_ERR(p);
349         if (error) {
350                 log_error("can't start lock_dlm1 thread %d", error);
351                 return error;
352         }
353         ls->thread1 = p;
354
355         p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
356         error = IS_ERR(p);
357         if (error) {
358                 log_error("can't start lock_dlm2 thread %d", error);
359                 kthread_stop(ls->thread1);
360                 return error;
361         }
362         ls->thread2 = p;
363
364         return 0;
365 }
366
367 void gdlm_release_threads(struct gdlm_ls *ls)
368 {
369         kthread_stop(ls->thread1);
370         kthread_stop(ls->thread2);
371 }
372