]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/reiserfs/journal.c
[MTD] [NAND] remove len/ooblen confusion.
[linux-2.6-omap-h63xx.git] / fs / reiserfs / journal.c
index 49d1a53dbef0e80f9dc2bea5ec61e415a8b53a5d..85ce23268302931e0a23a6bd6cc2a20a36581e56 100644 (file)
@@ -34,7 +34,6 @@
 **                     from within kupdate, it will ignore the immediate flag
 */
 
-#include <linux/config.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
@@ -54,6 +53,7 @@
 #include <linux/workqueue.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
+#include <linux/backing-dev.h>
 
 /* gets a struct reiserfs_journal_list * from a list head */
 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
@@ -719,8 +719,7 @@ static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
                        spinlock_t * lock, void (fn) (struct buffer_chunk *))
 {
        int ret = 0;
-       if (chunk->nr >= CHUNK_SIZE)
-               BUG();
+       BUG_ON(chunk->nr >= CHUNK_SIZE);
        chunk->bh[chunk->nr++] = bh;
        if (chunk->nr >= CHUNK_SIZE) {
                ret = 1;
@@ -789,8 +788,7 @@ static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
                /* buffer must be locked for __add_jh, should be able to have
                 * two adds at the same time
                 */
-               if (bh->b_private)
-                       BUG();
+               BUG_ON(bh->b_private);
                jh->bh = bh;
                bh->b_private = jh;
        }
@@ -973,7 +971,7 @@ int reiserfs_async_progress_wait(struct super_block *s)
        DEFINE_WAIT(wait);
        struct reiserfs_journal *j = SB_JOURNAL(s);
        if (atomic_read(&j->j_async_throttle))
-               blk_congestion_wait(WRITE, HZ / 10);
+               congestion_wait(WRITE, HZ / 10);
        return 0;
 }
 
@@ -1187,6 +1185,21 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
        return NULL;
 }
 
+static int newer_jl_done(struct reiserfs_journal_cnode *cn)
+{
+       struct super_block *sb = cn->sb;
+       b_blocknr_t blocknr = cn->blocknr;
+
+       cn = cn->hprev;
+       while (cn) {
+               if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
+                   atomic_read(&cn->jlist->j_commit_left) != 0)
+                                   return 0;
+               cn = cn->hprev;
+       }
+       return 1;
+}
+
 static void remove_journal_hash(struct super_block *,
                                struct reiserfs_journal_cnode **,
                                struct reiserfs_journal_list *, unsigned long,
@@ -1605,6 +1618,31 @@ static int flush_journal_list(struct super_block *s,
        return err;
 }
 
+static int test_transaction(struct super_block *s,
+                            struct reiserfs_journal_list *jl)
+{
+       struct reiserfs_journal_cnode *cn;
+
+       if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
+               return 1;
+
+       cn = jl->j_realblock;
+       while (cn) {
+               /* if the blocknr == 0, this has been cleared from the hash,
+                ** skip it
+                */
+               if (cn->blocknr == 0) {
+                       goto next;
+               }
+               if (cn->bh && !newer_jl_done(cn))
+                       return 0;
+             next:
+               cn = cn->next;
+               cond_resched();
+       }
+       return 0;
+}
+
 static int write_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl,
                                 struct buffer_chunk *chunk)
@@ -2928,8 +2966,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
        int retval;
 
        reiserfs_check_lock_depth(p_s_sb, "journal_begin");
-       if (nblocks > journal->j_trans_max)
-               BUG();
+       BUG_ON(nblocks > journal->j_trans_max);
 
        PROC_INFO_INC(p_s_sb, journal.journal_being);
        /* set here for journal_join */
@@ -3045,9 +3082,8 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
        if (reiserfs_transaction_running(s)) {
                th = current->journal_info;
                th->t_refcount++;
-               if (th->t_refcount < 2) {
-                       BUG();
-               }
+               BUG_ON(th->t_refcount < 2);
+               
                return th;
        }
        th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
@@ -3087,9 +3123,7 @@ static int journal_join(struct reiserfs_transaction_handle *th,
         ** pointer
         */
        th->t_handle_save = cur_th;
-       if (cur_th && cur_th->t_refcount > 1) {
-               BUG();
-       }
+       BUG_ON(cur_th && cur_th->t_refcount > 1);
        return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
 }
 
@@ -3102,9 +3136,7 @@ int journal_join_abort(struct reiserfs_transaction_handle *th,
         ** pointer
         */
        th->t_handle_save = cur_th;
-       if (cur_th && cur_th->t_refcount > 1) {
-               BUG();
-       }
+       BUG_ON(cur_th && cur_th->t_refcount > 1);
        return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
 }
 
@@ -3139,8 +3171,7 @@ int journal_begin(struct reiserfs_transaction_handle *th,
                current->journal_info = th;
        }
        ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
-       if (current->journal_info != th)
-               BUG();
+       BUG_ON(current->journal_info != th);
 
        /* I guess this boils down to being the reciprocal of clm-2100 above.
         * If do_journal_begin_r fails, we need to put it back, since journal_end
@@ -3285,8 +3316,7 @@ int journal_end(struct reiserfs_transaction_handle *th,
                /* we aren't allowed to close a nested transaction on a different
                 ** filesystem from the one in the task struct
                 */
-               if (cur_th->t_super != th->t_super)
-                       BUG();
+               BUG_ON(cur_th->t_super != th->t_super);
 
                if (th != cur_th) {
                        memcpy(current->journal_info, th, sizeof(*th));
@@ -3405,9 +3435,7 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
 
        BUG_ON(!th->t_trans_id);
        /* you can sync while nested, very, very bad */
-       if (th->t_refcount > 1) {
-               BUG();
-       }
+       BUG_ON(th->t_refcount > 1);
        if (journal->j_len == 0) {
                reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
                                             1);
@@ -3434,16 +3462,6 @@ static void flush_async_commits(void *p)
                flush_commit_list(p_s_sb, jl, 1);
        }
        unlock_kernel();
-       /*
-        * this is a little racey, but there's no harm in missing
-        * the filemap_fdata_write
-        */
-       if (!atomic_read(&journal->j_async_throttle)
-           && !reiserfs_is_journal_aborted(journal)) {
-               atomic_inc(&journal->j_async_throttle);
-               filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
-               atomic_dec(&journal->j_async_throttle);
-       }
 }
 
 /*
@@ -3527,9 +3545,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
         ** will be dealt with by next transaction that actually writes something, but should be taken
         ** care of in this trans
         */
-       if (journal->j_len == 0) {
-               BUG();
-       }
+       BUG_ON(journal->j_len == 0);
+
        /* if wcount > 0, and we are called to with flush or commit_now,
         ** we wait on j_join_wait.  We will wake up when the last writer has
         ** finished the transaction, and started it on its way to the disk.
@@ -3563,9 +3580,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
                                        unlock_journal(p_s_sb);
                                }
                        }
-                       if (journal->j_trans_id == trans_id) {
-                               BUG();
-                       }
+                       BUG_ON(journal->j_trans_id == trans_id);
+                       
                        if (commit_now
                            && journal_list_still_alive(p_s_sb, trans_id)
                            && wait_on_commit) {
@@ -3845,7 +3861,9 @@ static void flush_old_journal_lists(struct super_block *s)
                entry = journal->j_journal_list.next;
                jl = JOURNAL_LIST_ENTRY(entry);
                /* this check should always be run, to send old lists to disk */
-               if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
+               if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
+                   atomic_read(&jl->j_commit_left) == 0 &&
+                   test_transaction(s, jl)) {
                        flush_used_journal_lists(s, jl);
                } else {
                        break;
@@ -4043,9 +4061,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
        set_commit_trans_len(commit, journal->j_len);
 
        /* special check in case all buffers in the journal were marked for not logging */
-       if (journal->j_len == 0) {
-               BUG();
-       }
+       BUG_ON(journal->j_len == 0);
 
        /* we're about to dirty all the log blocks, mark the description block
         * dirty now too.  Don't mark the commit block dirty until all the
@@ -4142,8 +4158,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
                                      journal, jl, &jl->j_tail_bh_list);
                lock_kernel();
        }
-       if (!list_empty(&jl->j_tail_bh_list))
-               BUG();
+       BUG_ON(!list_empty(&jl->j_tail_bh_list));
        up(&jl->j_commit_lock);
 
        /* honor the flush wishes from the caller, simple commits can