]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/gfs2/lops.c
[GFS2] Only do lo_incore_commit once
[linux-2.6-omap-h63xx.git] / fs / gfs2 / lops.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "inode.h"
21 #include "glock.h"
22 #include "log.h"
23 #include "lops.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "trans.h"
28 #include "util.h"
29
30 /**
31  * gfs2_pin - Pin a buffer in memory
32  * @sdp: The superblock
33  * @bh: The buffer to be pinned
34  *
35  * The log lock must be held when calling this function
36  */
37 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
38 {
39         struct gfs2_bufdata *bd;
40
41         gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
42
43         clear_buffer_dirty(bh);
44         if (test_set_buffer_pinned(bh))
45                 gfs2_assert_withdraw(sdp, 0);
46         if (!buffer_uptodate(bh))
47                 gfs2_io_error_bh(sdp, bh);
48         bd = bh->b_private;
49         /* If this buffer is in the AIL and it has already been written
50          * to in-place disk block, remove it from the AIL.
51          */
52         if (bd->bd_ail)
53                 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
54         get_bh(bh);
55 }
56
57 /**
58  * gfs2_unpin - Unpin a buffer
59  * @sdp: the filesystem the buffer belongs to
60  * @bh: The buffer to unpin
61  * @ai:
62  *
63  */
64
65 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
66                        struct gfs2_ail *ai)
67 {
68         struct gfs2_bufdata *bd = bh->b_private;
69
70         gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
71
72         if (!buffer_pinned(bh))
73                 gfs2_assert_withdraw(sdp, 0);
74
75         lock_buffer(bh);
76         mark_buffer_dirty(bh);
77         clear_buffer_pinned(bh);
78
79         gfs2_log_lock(sdp);
80         if (bd->bd_ail) {
81                 list_del(&bd->bd_ail_st_list);
82                 brelse(bh);
83         } else {
84                 struct gfs2_glock *gl = bd->bd_gl;
85                 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
86                 atomic_inc(&gl->gl_ail_count);
87         }
88         bd->bd_ail = ai;
89         list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
90         clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
91         gfs2_log_unlock(sdp);
92         unlock_buffer(bh);
93 }
94
95
96 static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
97 {
98         return (struct gfs2_log_descriptor *)bh->b_data;
99 }
100
101 static inline __be64 *bh_log_ptr(struct buffer_head *bh)
102 {
103         struct gfs2_log_descriptor *ld = bh_log_desc(bh);
104         return (__force __be64 *)(ld + 1);
105 }
106
107 static inline __be64 *bh_ptr_end(struct buffer_head *bh)
108 {
109         return (__force __be64 *)(bh->b_data + bh->b_size);
110 }
111
112
113 static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
114 {
115         struct buffer_head *bh = gfs2_log_get_buf(sdp);
116         struct gfs2_log_descriptor *ld = bh_log_desc(bh);
117         ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
118         ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
119         ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
120         ld->ld_type = cpu_to_be32(ld_type);
121         ld->ld_length = 0;
122         ld->ld_data1 = 0;
123         ld->ld_data2 = 0;
124         memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
125         return bh;
126 }
127
128 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
129 {
130         struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
131         struct gfs2_trans *tr;
132
133         lock_buffer(bd->bd_bh);
134         gfs2_log_lock(sdp);
135         if (!list_empty(&bd->bd_list_tr))
136                 goto out;
137         tr = current->journal_info;
138         tr->tr_touched = 1;
139         tr->tr_num_buf++;
140         list_add(&bd->bd_list_tr, &tr->tr_list_buf);
141         if (!list_empty(&le->le_list))
142                 goto out;
143         set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
144         set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
145         gfs2_meta_check(sdp, bd->bd_bh);
146         gfs2_pin(sdp, bd->bd_bh);
147         sdp->sd_log_num_buf++;
148         list_add(&le->le_list, &sdp->sd_log_le_buf);
149         tr->tr_num_buf_new++;
150 out:
151         gfs2_log_unlock(sdp);
152         unlock_buffer(bd->bd_bh);
153 }
154
155 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
156 {
157         struct buffer_head *bh;
158         struct gfs2_log_descriptor *ld;
159         struct gfs2_bufdata *bd1 = NULL, *bd2;
160         unsigned int total;
161         unsigned int limit;
162         unsigned int num;
163         unsigned n;
164         __be64 *ptr;
165
166         limit = buf_limit(sdp);
167         /* for 4k blocks, limit = 503 */
168
169         gfs2_log_lock(sdp);
170         total = sdp->sd_log_num_buf;
171         bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
172         while(total) {
173                 num = total;
174                 if (total > limit)
175                         num = limit;
176                 gfs2_log_unlock(sdp);
177                 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
178                 gfs2_log_lock(sdp);
179                 ld = bh_log_desc(bh);
180                 ptr = bh_log_ptr(bh);
181                 ld->ld_length = cpu_to_be32(num + 1);
182                 ld->ld_data1 = cpu_to_be32(num);
183
184                 n = 0;
185                 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
186                                              bd_le.le_list) {
187                         *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
188                         if (++n >= num)
189                                 break;
190                 }
191
192                 gfs2_log_unlock(sdp);
193                 submit_bh(WRITE, bh);
194                 gfs2_log_lock(sdp);
195
196                 n = 0;
197                 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
198                                              bd_le.le_list) {
199                         get_bh(bd2->bd_bh);
200                         gfs2_log_unlock(sdp);
201                         lock_buffer(bd2->bd_bh);
202                         bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
203                         submit_bh(WRITE, bh);
204                         gfs2_log_lock(sdp);
205                         if (++n >= num)
206                                 break;
207                 }
208
209                 BUG_ON(total < num);
210                 total -= num;
211         }
212         gfs2_log_unlock(sdp);
213 }
214
215 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
216 {
217         struct list_head *head = &sdp->sd_log_le_buf;
218         struct gfs2_bufdata *bd;
219
220         while (!list_empty(head)) {
221                 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
222                 list_del_init(&bd->bd_le.le_list);
223                 sdp->sd_log_num_buf--;
224
225                 gfs2_unpin(sdp, bd->bd_bh, ai);
226         }
227         gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
228 }
229
230 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
231                                struct gfs2_log_header_host *head, int pass)
232 {
233         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
234
235         if (pass != 0)
236                 return;
237
238         sdp->sd_found_blocks = 0;
239         sdp->sd_replayed_blocks = 0;
240 }
241
242 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
243                                 struct gfs2_log_descriptor *ld, __be64 *ptr,
244                                 int pass)
245 {
246         struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
247         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
248         struct gfs2_glock *gl = ip->i_gl;
249         unsigned int blks = be32_to_cpu(ld->ld_data1);
250         struct buffer_head *bh_log, *bh_ip;
251         u64 blkno;
252         int error = 0;
253
254         if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
255                 return 0;
256
257         gfs2_replay_incr_blk(sdp, &start);
258
259         for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
260                 blkno = be64_to_cpu(*ptr++);
261
262                 sdp->sd_found_blocks++;
263
264                 if (gfs2_revoke_check(sdp, blkno, start))
265                         continue;
266
267                 error = gfs2_replay_read_block(jd, start, &bh_log);
268                 if (error)
269                         return error;
270
271                 bh_ip = gfs2_meta_new(gl, blkno);
272                 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
273
274                 if (gfs2_meta_check(sdp, bh_ip))
275                         error = -EIO;
276                 else
277                         mark_buffer_dirty(bh_ip);
278
279                 brelse(bh_log);
280                 brelse(bh_ip);
281
282                 if (error)
283                         break;
284
285                 sdp->sd_replayed_blocks++;
286         }
287
288         return error;
289 }
290
291 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
292 {
293         struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
294         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
295
296         if (error) {
297                 gfs2_meta_sync(ip->i_gl);
298                 return;
299         }
300         if (pass != 1)
301                 return;
302
303         gfs2_meta_sync(ip->i_gl);
304
305         fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
306                 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
307 }
308
309 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
310 {
311         struct gfs2_trans *tr;
312
313         tr = current->journal_info;
314         tr->tr_touched = 1;
315         tr->tr_num_revoke++;
316         sdp->sd_log_num_revoke++;
317         list_add(&le->le_list, &sdp->sd_log_le_revoke);
318 }
319
320 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
321 {
322         struct gfs2_log_descriptor *ld;
323         struct gfs2_meta_header *mh;
324         struct buffer_head *bh;
325         unsigned int offset;
326         struct list_head *head = &sdp->sd_log_le_revoke;
327         struct gfs2_bufdata *bd;
328
329         if (!sdp->sd_log_num_revoke)
330                 return;
331
332         bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
333         ld = bh_log_desc(bh);
334         ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
335                                                     sizeof(u64)));
336         ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
337         offset = sizeof(struct gfs2_log_descriptor);
338
339         while (!list_empty(head)) {
340                 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
341                 list_del_init(&bd->bd_le.le_list);
342                 sdp->sd_log_num_revoke--;
343
344                 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
345                         submit_bh(WRITE, bh);
346
347                         bh = gfs2_log_get_buf(sdp);
348                         mh = (struct gfs2_meta_header *)bh->b_data;
349                         mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
350                         mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
351                         mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
352                         offset = sizeof(struct gfs2_meta_header);
353                 }
354
355                 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
356                 kmem_cache_free(gfs2_bufdata_cachep, bd);
357
358                 offset += sizeof(u64);
359         }
360         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
361
362         submit_bh(WRITE, bh);
363 }
364
365 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
366                                   struct gfs2_log_header_host *head, int pass)
367 {
368         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
369
370         if (pass != 0)
371                 return;
372
373         sdp->sd_found_revokes = 0;
374         sdp->sd_replay_tail = head->lh_tail;
375 }
376
377 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
378                                    struct gfs2_log_descriptor *ld, __be64 *ptr,
379                                    int pass)
380 {
381         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
382         unsigned int blks = be32_to_cpu(ld->ld_length);
383         unsigned int revokes = be32_to_cpu(ld->ld_data1);
384         struct buffer_head *bh;
385         unsigned int offset;
386         u64 blkno;
387         int first = 1;
388         int error;
389
390         if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
391                 return 0;
392
393         offset = sizeof(struct gfs2_log_descriptor);
394
395         for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
396                 error = gfs2_replay_read_block(jd, start, &bh);
397                 if (error)
398                         return error;
399
400                 if (!first)
401                         gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
402
403                 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
404                         blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
405
406                         error = gfs2_revoke_add(sdp, blkno, start);
407                         if (error < 0)
408                                 return error;
409                         else if (error)
410                                 sdp->sd_found_revokes++;
411
412                         if (!--revokes)
413                                 break;
414                         offset += sizeof(u64);
415                 }
416
417                 brelse(bh);
418                 offset = sizeof(struct gfs2_meta_header);
419                 first = 0;
420         }
421
422         return 0;
423 }
424
425 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
426 {
427         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
428
429         if (error) {
430                 gfs2_revoke_clean(sdp);
431                 return;
432         }
433         if (pass != 1)
434                 return;
435
436         fs_info(sdp, "jid=%u: Found %u revoke tags\n",
437                 jd->jd_jid, sdp->sd_found_revokes);
438
439         gfs2_revoke_clean(sdp);
440 }
441
442 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
443 {
444         struct gfs2_rgrpd *rgd;
445         struct gfs2_trans *tr = current->journal_info;
446
447         tr->tr_touched = 1;
448
449         rgd = container_of(le, struct gfs2_rgrpd, rd_le);
450
451         gfs2_log_lock(sdp);
452         if (!list_empty(&le->le_list)){
453                 gfs2_log_unlock(sdp);
454                 return;
455         }
456         gfs2_rgrp_bh_hold(rgd);
457         sdp->sd_log_num_rg++;
458         list_add(&le->le_list, &sdp->sd_log_le_rg);
459         gfs2_log_unlock(sdp);
460 }
461
462 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
463 {
464         struct list_head *head = &sdp->sd_log_le_rg;
465         struct gfs2_rgrpd *rgd;
466
467         while (!list_empty(head)) {
468                 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
469                 list_del_init(&rgd->rd_le.le_list);
470                 sdp->sd_log_num_rg--;
471
472                 gfs2_rgrp_repolish_clones(rgd);
473                 gfs2_rgrp_bh_put(rgd);
474         }
475         gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
476 }
477
478 /**
479  * databuf_lo_add - Add a databuf to the transaction.
480  *
481  * This is used in two distinct cases:
482  * i) In ordered write mode
483  *    We put the data buffer on a list so that we can ensure that its
484  *    synced to disk at the right time
485  * ii) In journaled data mode
486  *    We need to journal the data block in the same way as metadata in
487  *    the functions above. The difference is that here we have a tag
488  *    which is two __be64's being the block number (as per meta data)
489  *    and a flag which says whether the data block needs escaping or
490  *    not. This means we need a new log entry for each 251 or so data
491  *    blocks, which isn't an enormous overhead but twice as much as
492  *    for normal metadata blocks.
493  */
494 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
495 {
496         struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
497         struct gfs2_trans *tr = current->journal_info;
498         struct address_space *mapping = bd->bd_bh->b_page->mapping;
499         struct gfs2_inode *ip = GFS2_I(mapping->host);
500
501         lock_buffer(bd->bd_bh);
502         gfs2_log_lock(sdp);
503         if (tr) {
504                 if (!list_empty(&bd->bd_list_tr))
505                         goto out;
506                 tr->tr_touched = 1;
507                 if (gfs2_is_jdata(ip)) {
508                         tr->tr_num_buf++;
509                         list_add(&bd->bd_list_tr, &tr->tr_list_buf);
510                 }
511         }
512         if (!list_empty(&le->le_list))
513                 goto out;
514
515         set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
516         set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
517         if (gfs2_is_jdata(ip)) {
518                 gfs2_pin(sdp, bd->bd_bh);
519                 tr->tr_num_databuf_new++;
520                 sdp->sd_log_num_databuf++;
521                 list_add(&le->le_list, &sdp->sd_log_le_databuf);
522         } else {
523                 list_add(&le->le_list, &sdp->sd_log_le_ordered);
524         }
525 out:
526         gfs2_log_unlock(sdp);
527         unlock_buffer(bd->bd_bh);
528 }
529
530 static void gfs2_check_magic(struct buffer_head *bh)
531 {
532         void *kaddr;
533         __be32 *ptr;
534
535         clear_buffer_escaped(bh);
536         kaddr = kmap_atomic(bh->b_page, KM_USER0);
537         ptr = kaddr + bh_offset(bh);
538         if (*ptr == cpu_to_be32(GFS2_MAGIC))
539                 set_buffer_escaped(bh);
540         kunmap_atomic(kaddr, KM_USER0);
541 }
542
543 static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
544                               struct list_head *list, struct list_head *done,
545                               unsigned int n)
546 {
547         struct buffer_head *bh1;
548         struct gfs2_log_descriptor *ld;
549         struct gfs2_bufdata *bd;
550         __be64 *ptr;
551
552         if (!bh)
553                 return;
554
555         ld = bh_log_desc(bh);
556         ld->ld_length = cpu_to_be32(n + 1);
557         ld->ld_data1 = cpu_to_be32(n);
558
559         ptr = bh_log_ptr(bh);
560         
561         get_bh(bh);
562         submit_bh(WRITE, bh);
563         gfs2_log_lock(sdp);
564         while(!list_empty(list)) {
565                 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
566                 list_move_tail(&bd->bd_le.le_list, done);
567                 get_bh(bd->bd_bh);
568                 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
569                         gfs2_log_incr_head(sdp);
570                         ptr += 2;
571                 }
572                 gfs2_log_unlock(sdp);
573                 lock_buffer(bd->bd_bh);
574                 if (buffer_escaped(bd->bd_bh)) {
575                         void *kaddr;
576                         bh1 = gfs2_log_get_buf(sdp);
577                         kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
578                         memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
579                                bh1->b_size);
580                         kunmap_atomic(kaddr, KM_USER0);
581                         *(__be32 *)bh1->b_data = 0;
582                         clear_buffer_escaped(bd->bd_bh);
583                         unlock_buffer(bd->bd_bh);
584                         brelse(bd->bd_bh);
585                 } else {
586                         bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
587                 }
588                 submit_bh(WRITE, bh1);
589                 gfs2_log_lock(sdp);
590                 ptr += 2;
591         }
592         gfs2_log_unlock(sdp);
593         brelse(bh);
594 }
595
596 /**
597  * databuf_lo_before_commit - Scan the data buffers, writing as we go
598  *
599  */
600
601 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
602 {
603         struct gfs2_bufdata *bd = NULL;
604         struct buffer_head *bh = NULL;
605         unsigned int n = 0;
606         __be64 *ptr = NULL, *end = NULL;
607         LIST_HEAD(processed);
608         LIST_HEAD(in_progress);
609
610         gfs2_log_lock(sdp);
611         while (!list_empty(&sdp->sd_log_le_databuf)) {
612                 if (ptr == end) {
613                         gfs2_log_unlock(sdp);
614                         gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
615                         n = 0;
616                         bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
617                         ptr = bh_log_ptr(bh);
618                         end = bh_ptr_end(bh) - 1;
619                         gfs2_log_lock(sdp);
620                         continue;
621                 }
622                 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
623                 list_move_tail(&bd->bd_le.le_list, &in_progress);
624                 gfs2_check_magic(bd->bd_bh);
625                 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
626                 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
627                 n++;
628         }
629         gfs2_log_unlock(sdp);
630         gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
631         gfs2_log_lock(sdp);
632         list_splice(&processed, &sdp->sd_log_le_databuf);
633         gfs2_log_unlock(sdp);
634 }
635
636 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
637                                     struct gfs2_log_descriptor *ld,
638                                     __be64 *ptr, int pass)
639 {
640         struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
641         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
642         struct gfs2_glock *gl = ip->i_gl;
643         unsigned int blks = be32_to_cpu(ld->ld_data1);
644         struct buffer_head *bh_log, *bh_ip;
645         u64 blkno;
646         u64 esc;
647         int error = 0;
648
649         if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
650                 return 0;
651
652         gfs2_replay_incr_blk(sdp, &start);
653         for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
654                 blkno = be64_to_cpu(*ptr++);
655                 esc = be64_to_cpu(*ptr++);
656
657                 sdp->sd_found_blocks++;
658
659                 if (gfs2_revoke_check(sdp, blkno, start))
660                         continue;
661
662                 error = gfs2_replay_read_block(jd, start, &bh_log);
663                 if (error)
664                         return error;
665
666                 bh_ip = gfs2_meta_new(gl, blkno);
667                 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
668
669                 /* Unescape */
670                 if (esc) {
671                         __be32 *eptr = (__be32 *)bh_ip->b_data;
672                         *eptr = cpu_to_be32(GFS2_MAGIC);
673                 }
674                 mark_buffer_dirty(bh_ip);
675
676                 brelse(bh_log);
677                 brelse(bh_ip);
678                 if (error)
679                         break;
680
681                 sdp->sd_replayed_blocks++;
682         }
683
684         return error;
685 }
686
687 /* FIXME: sort out accounting for log blocks etc. */
688
689 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
690 {
691         struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
692         struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
693
694         if (error) {
695                 gfs2_meta_sync(ip->i_gl);
696                 return;
697         }
698         if (pass != 1)
699                 return;
700
701         /* data sync? */
702         gfs2_meta_sync(ip->i_gl);
703
704         fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
705                 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
706 }
707
708 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
709 {
710         struct list_head *head = &sdp->sd_log_le_databuf;
711         struct gfs2_bufdata *bd;
712
713         while (!list_empty(head)) {
714                 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
715                 list_del_init(&bd->bd_le.le_list);
716                 sdp->sd_log_num_databuf--;
717                 gfs2_unpin(sdp, bd->bd_bh, ai);
718         }
719         gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
720 }
721
722
723 const struct gfs2_log_operations gfs2_buf_lops = {
724         .lo_add = buf_lo_add,
725         .lo_before_commit = buf_lo_before_commit,
726         .lo_after_commit = buf_lo_after_commit,
727         .lo_before_scan = buf_lo_before_scan,
728         .lo_scan_elements = buf_lo_scan_elements,
729         .lo_after_scan = buf_lo_after_scan,
730         .lo_name = "buf",
731 };
732
733 const struct gfs2_log_operations gfs2_revoke_lops = {
734         .lo_add = revoke_lo_add,
735         .lo_before_commit = revoke_lo_before_commit,
736         .lo_before_scan = revoke_lo_before_scan,
737         .lo_scan_elements = revoke_lo_scan_elements,
738         .lo_after_scan = revoke_lo_after_scan,
739         .lo_name = "revoke",
740 };
741
742 const struct gfs2_log_operations gfs2_rg_lops = {
743         .lo_add = rg_lo_add,
744         .lo_after_commit = rg_lo_after_commit,
745         .lo_name = "rg",
746 };
747
748 const struct gfs2_log_operations gfs2_databuf_lops = {
749         .lo_add = databuf_lo_add,
750         .lo_before_commit = databuf_lo_before_commit,
751         .lo_after_commit = databuf_lo_after_commit,
752         .lo_scan_elements = databuf_lo_scan_elements,
753         .lo_after_scan = databuf_lo_after_scan,
754         .lo_name = "databuf",
755 };
756
757 const struct gfs2_log_operations *gfs2_log_ops[] = {
758         &gfs2_databuf_lops,
759         &gfs2_buf_lops,
760         &gfs2_rg_lops,
761         &gfs2_revoke_lops,
762         NULL,
763 };
764