2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
20 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
27 #include <linux/highmem.h>
29 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
32 * jbd2_get_transaction: obtain a new transaction_t object.
34 * Simply allocate and initialise a new transaction. Create it in
35 * RUNNING state and add it to the current journal (which should not
36 * have an existing running transaction: we only make a new transaction
37 * once we have started to commit the old one).
40 * The journal MUST be locked. We don't perform atomic mallocs on the
41 * new transaction and we can't block without protecting against other
42 * processes trying to touch the journal while it is in transition.
46 static transaction_t *
47 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
49 transaction->t_journal = journal;
50 transaction->t_state = T_RUNNING;
51 transaction->t_tid = journal->j_transaction_sequence++;
52 transaction->t_expires = jiffies + journal->j_commit_interval;
53 spin_lock_init(&transaction->t_handle_lock);
55 /* Set up the commit timer for the new transaction. */
56 journal->j_commit_timer.expires = round_jiffies(transaction->t_expires);
57 add_timer(&journal->j_commit_timer);
59 J_ASSERT(journal->j_running_transaction == NULL);
60 journal->j_running_transaction = transaction;
61 transaction->t_max_wait = 0;
62 transaction->t_start = jiffies;
70 * A handle_t is an object which represents a single atomic update to a
71 * filesystem, and which tracks all of the modifications which form part
76 * start_this_handle: Given a handle, deal with any locking or stalling
77 * needed to make sure that there is enough journal space for the handle
78 * to begin. Attach the handle to a transaction and set up the
79 * transaction's buffer credits.
82 static int start_this_handle(journal_t *journal, handle_t *handle)
84 transaction_t *transaction;
86 int nblocks = handle->h_buffer_credits;
87 transaction_t *new_transaction = NULL;
89 unsigned long ts = jiffies;
91 if (nblocks > journal->j_max_transaction_buffers) {
92 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
93 current->comm, nblocks,
94 journal->j_max_transaction_buffers);
100 if (!journal->j_running_transaction) {
101 new_transaction = kzalloc(sizeof(*new_transaction),
102 GFP_NOFS|__GFP_NOFAIL);
103 if (!new_transaction) {
109 jbd_debug(3, "New handle %p going live.\n", handle);
114 * We need to hold j_state_lock until t_updates has been incremented,
115 * for proper journal barrier handling
117 spin_lock(&journal->j_state_lock);
119 if (is_journal_aborted(journal) ||
120 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
121 spin_unlock(&journal->j_state_lock);
126 /* Wait on the journal's transaction barrier if necessary */
127 if (journal->j_barrier_count) {
128 spin_unlock(&journal->j_state_lock);
129 wait_event(journal->j_wait_transaction_locked,
130 journal->j_barrier_count == 0);
134 if (!journal->j_running_transaction) {
135 if (!new_transaction) {
136 spin_unlock(&journal->j_state_lock);
137 goto alloc_transaction;
139 jbd2_get_transaction(journal, new_transaction);
140 new_transaction = NULL;
143 transaction = journal->j_running_transaction;
146 * If the current transaction is locked down for commit, wait for the
147 * lock to be released.
149 if (transaction->t_state == T_LOCKED) {
152 prepare_to_wait(&journal->j_wait_transaction_locked,
153 &wait, TASK_UNINTERRUPTIBLE);
154 spin_unlock(&journal->j_state_lock);
156 finish_wait(&journal->j_wait_transaction_locked, &wait);
161 * If there is not enough space left in the log to write all potential
162 * buffers requested by this operation, we need to stall pending a log
163 * checkpoint to free some more log space.
165 spin_lock(&transaction->t_handle_lock);
166 needed = transaction->t_outstanding_credits + nblocks;
168 if (needed > journal->j_max_transaction_buffers) {
170 * If the current transaction is already too large, then start
171 * to commit it: we can then go back and attach this handle to
176 jbd_debug(2, "Handle %p starting new commit...\n", handle);
177 spin_unlock(&transaction->t_handle_lock);
178 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
179 TASK_UNINTERRUPTIBLE);
180 __jbd2_log_start_commit(journal, transaction->t_tid);
181 spin_unlock(&journal->j_state_lock);
183 finish_wait(&journal->j_wait_transaction_locked, &wait);
188 * The commit code assumes that it can get enough log space
189 * without forcing a checkpoint. This is *critical* for
190 * correctness: a checkpoint of a buffer which is also
191 * associated with a committing transaction creates a deadlock,
192 * so commit simply cannot force through checkpoints.
194 * We must therefore ensure the necessary space in the journal
195 * *before* starting to dirty potentially checkpointed buffers
196 * in the new transaction.
198 * The worst part is, any transaction currently committing can
199 * reduce the free space arbitrarily. Be careful to account for
200 * those buffers when checkpointing.
204 * @@@ AKPM: This seems rather over-defensive. We're giving commit
205 * a _lot_ of headroom: 1/4 of the journal plus the size of
206 * the committing transaction. Really, we only need to give it
207 * committing_transaction->t_outstanding_credits plus "enough" for
208 * the log control blocks.
209 * Also, this test is inconsitent with the matching one in
210 * jbd2_journal_extend().
212 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
213 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
214 spin_unlock(&transaction->t_handle_lock);
215 __jbd2_log_wait_for_space(journal);
219 /* OK, account for the buffers that this operation expects to
220 * use and add the handle to the running transaction. */
222 if (time_after(transaction->t_start, ts)) {
223 ts = jbd2_time_diff(ts, transaction->t_start);
224 if (ts > transaction->t_max_wait)
225 transaction->t_max_wait = ts;
228 handle->h_transaction = transaction;
229 transaction->t_outstanding_credits += nblocks;
230 transaction->t_updates++;
231 transaction->t_handle_count++;
232 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
233 handle, nblocks, transaction->t_outstanding_credits,
234 __jbd2_log_space_left(journal));
235 spin_unlock(&transaction->t_handle_lock);
236 spin_unlock(&journal->j_state_lock);
238 if (unlikely(new_transaction)) /* It's usually NULL */
239 kfree(new_transaction);
243 static struct lock_class_key jbd2_handle_key;
245 /* Allocate a new handle. This should probably be in a slab... */
246 static handle_t *new_handle(int nblocks)
248 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
251 memset(handle, 0, sizeof(*handle));
252 handle->h_buffer_credits = nblocks;
255 lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
256 &jbd2_handle_key, 0);
262 * handle_t *jbd2_journal_start() - Obtain a new handle.
263 * @journal: Journal to start transaction on.
264 * @nblocks: number of block buffer we might modify
266 * We make sure that the transaction can guarantee at least nblocks of
267 * modified buffers in the log. We block until the log can guarantee
270 * This function is visible to journal users (like ext3fs), so is not
271 * called with the journal already locked.
273 * Return a pointer to a newly allocated handle, or NULL on failure
275 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
277 handle_t *handle = journal_current_handle();
281 return ERR_PTR(-EROFS);
284 J_ASSERT(handle->h_transaction->t_journal == journal);
289 handle = new_handle(nblocks);
291 return ERR_PTR(-ENOMEM);
293 current->journal_info = handle;
295 err = start_this_handle(journal, handle);
297 jbd2_free_handle(handle);
298 current->journal_info = NULL;
299 handle = ERR_PTR(err);
303 lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
309 * int jbd2_journal_extend() - extend buffer credits.
310 * @handle: handle to 'extend'
311 * @nblocks: nr blocks to try to extend by.
313 * Some transactions, such as large extends and truncates, can be done
314 * atomically all at once or in several stages. The operation requests
315 * a credit for a number of buffer modications in advance, but can
316 * extend its credit if it needs more.
318 * jbd2_journal_extend tries to give the running handle more buffer credits.
319 * It does not guarantee that allocation - this is a best-effort only.
320 * The calling process MUST be able to deal cleanly with a failure to
323 * Return 0 on success, non-zero on failure.
325 * return code < 0 implies an error
326 * return code > 0 implies normal transaction-full status.
328 int jbd2_journal_extend(handle_t *handle, int nblocks)
330 transaction_t *transaction = handle->h_transaction;
331 journal_t *journal = transaction->t_journal;
336 if (is_handle_aborted(handle))
341 spin_lock(&journal->j_state_lock);
343 /* Don't extend a locked-down transaction! */
344 if (handle->h_transaction->t_state != T_RUNNING) {
345 jbd_debug(3, "denied handle %p %d blocks: "
346 "transaction not running\n", handle, nblocks);
350 spin_lock(&transaction->t_handle_lock);
351 wanted = transaction->t_outstanding_credits + nblocks;
353 if (wanted > journal->j_max_transaction_buffers) {
354 jbd_debug(3, "denied handle %p %d blocks: "
355 "transaction too large\n", handle, nblocks);
359 if (wanted > __jbd2_log_space_left(journal)) {
360 jbd_debug(3, "denied handle %p %d blocks: "
361 "insufficient log space\n", handle, nblocks);
365 handle->h_buffer_credits += nblocks;
366 transaction->t_outstanding_credits += nblocks;
369 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
371 spin_unlock(&transaction->t_handle_lock);
373 spin_unlock(&journal->j_state_lock);
380 * int jbd2_journal_restart() - restart a handle .
381 * @handle: handle to restart
382 * @nblocks: nr credits requested
384 * Restart a handle for a multi-transaction filesystem
387 * If the jbd2_journal_extend() call above fails to grant new buffer credits
388 * to a running handle, a call to jbd2_journal_restart will commit the
389 * handle's transaction so far and reattach the handle to a new
390 * transaction capabable of guaranteeing the requested number of
394 int jbd2_journal_restart(handle_t *handle, int nblocks)
396 transaction_t *transaction = handle->h_transaction;
397 journal_t *journal = transaction->t_journal;
400 /* If we've had an abort of any type, don't even think about
401 * actually doing the restart! */
402 if (is_handle_aborted(handle))
406 * First unlink the handle from its current transaction, and start the
409 J_ASSERT(transaction->t_updates > 0);
410 J_ASSERT(journal_current_handle() == handle);
412 spin_lock(&journal->j_state_lock);
413 spin_lock(&transaction->t_handle_lock);
414 transaction->t_outstanding_credits -= handle->h_buffer_credits;
415 transaction->t_updates--;
417 if (!transaction->t_updates)
418 wake_up(&journal->j_wait_updates);
419 spin_unlock(&transaction->t_handle_lock);
421 jbd_debug(2, "restarting handle %p\n", handle);
422 __jbd2_log_start_commit(journal, transaction->t_tid);
423 spin_unlock(&journal->j_state_lock);
425 handle->h_buffer_credits = nblocks;
426 ret = start_this_handle(journal, handle);
432 * void jbd2_journal_lock_updates () - establish a transaction barrier.
433 * @journal: Journal to establish a barrier on.
435 * This locks out any further updates from being started, and blocks
436 * until all existing updates have completed, returning only once the
437 * journal is in a quiescent state with no updates running.
439 * The journal lock should not be held on entry.
441 void jbd2_journal_lock_updates(journal_t *journal)
445 spin_lock(&journal->j_state_lock);
446 ++journal->j_barrier_count;
448 /* Wait until there are no running updates */
450 transaction_t *transaction = journal->j_running_transaction;
455 spin_lock(&transaction->t_handle_lock);
456 if (!transaction->t_updates) {
457 spin_unlock(&transaction->t_handle_lock);
460 prepare_to_wait(&journal->j_wait_updates, &wait,
461 TASK_UNINTERRUPTIBLE);
462 spin_unlock(&transaction->t_handle_lock);
463 spin_unlock(&journal->j_state_lock);
465 finish_wait(&journal->j_wait_updates, &wait);
466 spin_lock(&journal->j_state_lock);
468 spin_unlock(&journal->j_state_lock);
471 * We have now established a barrier against other normal updates, but
472 * we also need to barrier against other jbd2_journal_lock_updates() calls
473 * to make sure that we serialise special journal-locked operations
476 mutex_lock(&journal->j_barrier);
480 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
481 * @journal: Journal to release the barrier on.
483 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
485 * Should be called without the journal lock held.
487 void jbd2_journal_unlock_updates (journal_t *journal)
489 J_ASSERT(journal->j_barrier_count != 0);
491 mutex_unlock(&journal->j_barrier);
492 spin_lock(&journal->j_state_lock);
493 --journal->j_barrier_count;
494 spin_unlock(&journal->j_state_lock);
495 wake_up(&journal->j_wait_transaction_locked);
499 * Report any unexpected dirty buffers which turn up. Normally those
500 * indicate an error, but they can occur if the user is running (say)
501 * tune2fs to modify the live filesystem, so we need the option of
502 * continuing as gracefully as possible. #
504 * The caller should already hold the journal lock and
505 * j_list_lock spinlock: most callers will need those anyway
506 * in order to probe the buffer's journaling state safely.
508 static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
512 /* If this buffer is one which might reasonably be dirty
513 * --- ie. data, or not part of this journal --- then
514 * we're OK to leave it alone, but otherwise we need to
515 * move the dirty bit to the journal's own internal
519 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
520 jlist == BJ_Shadow || jlist == BJ_Forget) {
521 struct buffer_head *bh = jh2bh(jh);
523 if (test_clear_buffer_dirty(bh))
524 set_buffer_jbddirty(bh);
529 * If the buffer is already part of the current transaction, then there
530 * is nothing we need to do. If it is already part of a prior
531 * transaction which we are still committing to disk, then we need to
532 * make sure that we do not overwrite the old copy: we do copy-out to
533 * preserve the copy going to disk. We also account the buffer against
534 * the handle's metadata buffer credits (unless the buffer is already
535 * part of the transaction, that is).
539 do_get_write_access(handle_t *handle, struct journal_head *jh,
542 struct buffer_head *bh;
543 transaction_t *transaction;
546 char *frozen_buffer = NULL;
549 if (is_handle_aborted(handle))
552 transaction = handle->h_transaction;
553 journal = transaction->t_journal;
555 jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
557 JBUFFER_TRACE(jh, "entry");
561 /* @@@ Need to check for errors here at some point. */
564 jbd_lock_bh_state(bh);
566 /* We now hold the buffer lock so it is safe to query the buffer
567 * state. Is the buffer dirty?
569 * If so, there are two possibilities. The buffer may be
570 * non-journaled, and undergoing a quite legitimate writeback.
571 * Otherwise, it is journaled, and we don't expect dirty buffers
572 * in that state (the buffers should be marked JBD_Dirty
573 * instead.) So either the IO is being done under our own
574 * control and this is a bug, or it's a third party IO such as
575 * dump(8) (which may leave the buffer scheduled for read ---
576 * ie. locked but not dirty) or tune2fs (which may actually have
577 * the buffer dirtied, ugh.) */
579 if (buffer_dirty(bh)) {
581 * First question: is this buffer already part of the current
582 * transaction or the existing committing transaction?
584 if (jh->b_transaction) {
586 jh->b_transaction == transaction ||
588 journal->j_committing_transaction);
589 if (jh->b_next_transaction)
590 J_ASSERT_JH(jh, jh->b_next_transaction ==
594 * In any case we need to clean the dirty flag and we must
595 * do it under the buffer lock to be sure we don't race
596 * with running write-out.
598 JBUFFER_TRACE(jh, "Unexpected dirty buffer");
599 jbd_unexpected_dirty_buffer(jh);
605 if (is_handle_aborted(handle)) {
606 jbd_unlock_bh_state(bh);
612 * The buffer is already part of this transaction if b_transaction or
613 * b_next_transaction points to it
615 if (jh->b_transaction == transaction ||
616 jh->b_next_transaction == transaction)
620 * this is the first time this transaction is touching this buffer,
621 * reset the modified flag
626 * If there is already a copy-out version of this buffer, then we don't
627 * need to make another one
629 if (jh->b_frozen_data) {
630 JBUFFER_TRACE(jh, "has frozen data");
631 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
632 jh->b_next_transaction = transaction;
636 /* Is there data here we need to preserve? */
638 if (jh->b_transaction && jh->b_transaction != transaction) {
639 JBUFFER_TRACE(jh, "owned by older transaction");
640 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
641 J_ASSERT_JH(jh, jh->b_transaction ==
642 journal->j_committing_transaction);
644 /* There is one case we have to be very careful about.
645 * If the committing transaction is currently writing
646 * this buffer out to disk and has NOT made a copy-out,
647 * then we cannot modify the buffer contents at all
648 * right now. The essence of copy-out is that it is the
649 * extra copy, not the primary copy, which gets
650 * journaled. If the primary copy is already going to
651 * disk then we cannot do copy-out here. */
653 if (jh->b_jlist == BJ_Shadow) {
654 DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
655 wait_queue_head_t *wqh;
657 wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
659 JBUFFER_TRACE(jh, "on shadow: sleep");
660 jbd_unlock_bh_state(bh);
661 /* commit wakes up all shadow buffers after IO */
663 prepare_to_wait(wqh, &wait.wait,
664 TASK_UNINTERRUPTIBLE);
665 if (jh->b_jlist != BJ_Shadow)
669 finish_wait(wqh, &wait.wait);
673 /* Only do the copy if the currently-owning transaction
674 * still needs it. If it is on the Forget list, the
675 * committing transaction is past that stage. The
676 * buffer had better remain locked during the kmalloc,
677 * but that should be true --- we hold the journal lock
678 * still and the buffer is already on the BUF_JOURNAL
679 * list so won't be flushed.
681 * Subtle point, though: if this is a get_undo_access,
682 * then we will be relying on the frozen_data to contain
683 * the new value of the committed_data record after the
684 * transaction, so we HAVE to force the frozen_data copy
687 if (jh->b_jlist != BJ_Forget || force_copy) {
688 JBUFFER_TRACE(jh, "generate frozen data");
689 if (!frozen_buffer) {
690 JBUFFER_TRACE(jh, "allocate memory for buffer");
691 jbd_unlock_bh_state(bh);
693 jbd2_alloc(jh2bh(jh)->b_size,
695 if (!frozen_buffer) {
697 "%s: OOM for frozen_buffer\n",
699 JBUFFER_TRACE(jh, "oom!");
701 jbd_lock_bh_state(bh);
706 jh->b_frozen_data = frozen_buffer;
707 frozen_buffer = NULL;
710 jh->b_next_transaction = transaction;
715 * Finally, if the buffer is not journaled right now, we need to make
716 * sure it doesn't get written to disk before the caller actually
717 * commits the new data
719 if (!jh->b_transaction) {
720 JBUFFER_TRACE(jh, "no transaction");
721 J_ASSERT_JH(jh, !jh->b_next_transaction);
722 jh->b_transaction = transaction;
723 JBUFFER_TRACE(jh, "file as BJ_Reserved");
724 spin_lock(&journal->j_list_lock);
725 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
726 spin_unlock(&journal->j_list_lock);
735 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
736 "Possible IO failure.\n");
737 page = jh2bh(jh)->b_page;
738 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
739 source = kmap_atomic(page, KM_USER0);
740 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
741 kunmap_atomic(source, KM_USER0);
743 jbd_unlock_bh_state(bh);
746 * If we are about to journal a buffer, then any revoke pending on it is
749 jbd2_journal_cancel_revoke(handle, jh);
752 if (unlikely(frozen_buffer)) /* It's usually NULL */
753 jbd2_free(frozen_buffer, bh->b_size);
755 JBUFFER_TRACE(jh, "exit");
760 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
761 * @handle: transaction to add buffer modifications to
762 * @bh: bh to be used for metadata writes
763 * @credits: variable that will receive credits for the buffer
765 * Returns an error code or 0 on success.
767 * In full data journalling mode the buffer may be of type BJ_AsyncData,
768 * because we're write()ing a buffer which is also part of a shared mapping.
771 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
773 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
776 /* We do not want to get caught playing with fields which the
777 * log thread also manipulates. Make sure that the buffer
778 * completes any outstanding IO before proceeding. */
779 rc = do_get_write_access(handle, jh, 0);
780 jbd2_journal_put_journal_head(jh);
786 * When the user wants to journal a newly created buffer_head
787 * (ie. getblk() returned a new buffer and we are going to populate it
788 * manually rather than reading off disk), then we need to keep the
789 * buffer_head locked until it has been completely filled with new
790 * data. In this case, we should be able to make the assertion that
791 * the bh is not already part of an existing transaction.
793 * The buffer should already be locked by the caller by this point.
794 * There is no lock ranking violation: it was a newly created,
795 * unlocked buffer beforehand. */
798 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
799 * @handle: transaction to new buffer to
802 * Call this if you create a new bh.
804 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
806 transaction_t *transaction = handle->h_transaction;
807 journal_t *journal = transaction->t_journal;
808 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
811 jbd_debug(5, "journal_head %p\n", jh);
813 if (is_handle_aborted(handle))
817 JBUFFER_TRACE(jh, "entry");
819 * The buffer may already belong to this transaction due to pre-zeroing
820 * in the filesystem's new_block code. It may also be on the previous,
821 * committing transaction's lists, but it HAS to be in Forget state in
822 * that case: the transaction must have deleted the buffer for it to be
825 jbd_lock_bh_state(bh);
826 spin_lock(&journal->j_list_lock);
827 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
828 jh->b_transaction == NULL ||
829 (jh->b_transaction == journal->j_committing_transaction &&
830 jh->b_jlist == BJ_Forget)));
832 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
833 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
835 if (jh->b_transaction == NULL) {
836 jh->b_transaction = transaction;
838 /* first access by this transaction */
841 JBUFFER_TRACE(jh, "file as BJ_Reserved");
842 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
843 } else if (jh->b_transaction == journal->j_committing_transaction) {
844 /* first access by this transaction */
847 JBUFFER_TRACE(jh, "set next transaction");
848 jh->b_next_transaction = transaction;
850 spin_unlock(&journal->j_list_lock);
851 jbd_unlock_bh_state(bh);
854 * akpm: I added this. ext3_alloc_branch can pick up new indirect
855 * blocks which contain freed but then revoked metadata. We need
856 * to cancel the revoke in case we end up freeing it yet again
857 * and the reallocating as data - this would cause a second revoke,
858 * which hits an assertion error.
860 JBUFFER_TRACE(jh, "cancelling revoke");
861 jbd2_journal_cancel_revoke(handle, jh);
862 jbd2_journal_put_journal_head(jh);
868 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
869 * non-rewindable consequences
870 * @handle: transaction
871 * @bh: buffer to undo
872 * @credits: store the number of taken credits here (if not NULL)
874 * Sometimes there is a need to distinguish between metadata which has
875 * been committed to disk and that which has not. The ext3fs code uses
876 * this for freeing and allocating space, we have to make sure that we
877 * do not reuse freed space until the deallocation has been committed,
878 * since if we overwrote that space we would make the delete
879 * un-rewindable in case of a crash.
881 * To deal with that, jbd2_journal_get_undo_access requests write access to a
882 * buffer for parts of non-rewindable operations such as delete
883 * operations on the bitmaps. The journaling code must keep a copy of
884 * the buffer's contents prior to the undo_access call until such time
885 * as we know that the buffer has definitely been committed to disk.
887 * We never need to know which transaction the committed data is part
888 * of, buffers touched here are guaranteed to be dirtied later and so
889 * will be committed to a new transaction in due course, at which point
890 * we can discard the old committed data pointer.
892 * Returns error number or 0 on success.
894 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
897 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
898 char *committed_data = NULL;
900 JBUFFER_TRACE(jh, "entry");
903 * Do this first --- it can drop the journal lock, so we want to
904 * make sure that obtaining the committed_data is done
905 * atomically wrt. completion of any outstanding commits.
907 err = do_get_write_access(handle, jh, 1);
912 if (!jh->b_committed_data) {
913 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
914 if (!committed_data) {
915 printk(KERN_EMERG "%s: No memory for committed data\n",
922 jbd_lock_bh_state(bh);
923 if (!jh->b_committed_data) {
924 /* Copy out the current buffer contents into the
925 * preserved, committed copy. */
926 JBUFFER_TRACE(jh, "generate b_committed data");
927 if (!committed_data) {
928 jbd_unlock_bh_state(bh);
932 jh->b_committed_data = committed_data;
933 committed_data = NULL;
934 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
936 jbd_unlock_bh_state(bh);
938 jbd2_journal_put_journal_head(jh);
939 if (unlikely(committed_data))
940 jbd2_free(committed_data, bh->b_size);
945 * int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which
946 * needs to be flushed before we can commit the
947 * current transaction.
948 * @handle: transaction
949 * @bh: bufferhead to mark
951 * The buffer is placed on the transaction's data list and is marked as
952 * belonging to the transaction.
954 * Returns error number or 0 on success.
956 * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
959 int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
961 journal_t *journal = handle->h_transaction->t_journal;
963 struct journal_head *jh;
965 if (is_handle_aborted(handle))
968 jh = jbd2_journal_add_journal_head(bh);
969 JBUFFER_TRACE(jh, "entry");
972 * The buffer could *already* be dirty. Writeout can start
975 jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
978 * What if the buffer is already part of a running transaction?
980 * There are two cases:
981 * 1) It is part of the current running transaction. Refile it,
982 * just in case we have allocated it as metadata, deallocated
983 * it, then reallocated it as data.
984 * 2) It is part of the previous, still-committing transaction.
985 * If all we want to do is to guarantee that the buffer will be
986 * written to disk before this new transaction commits, then
987 * being sure that the *previous* transaction has this same
988 * property is sufficient for us! Just leave it on its old
991 * In case (2), the buffer must not already exist as metadata
992 * --- that would violate write ordering (a transaction is free
993 * to write its data at any point, even before the previous
994 * committing transaction has committed). The caller must
995 * never, ever allow this to happen: there's nothing we can do
996 * about it in this layer.
998 jbd_lock_bh_state(bh);
999 spin_lock(&journal->j_list_lock);
1001 /* Now that we have bh_state locked, are we really still mapped? */
1002 if (!buffer_mapped(bh)) {
1003 JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
1007 if (jh->b_transaction) {
1008 JBUFFER_TRACE(jh, "has transaction");
1009 if (jh->b_transaction != handle->h_transaction) {
1010 JBUFFER_TRACE(jh, "belongs to older transaction");
1011 J_ASSERT_JH(jh, jh->b_transaction ==
1012 journal->j_committing_transaction);
1014 /* @@@ IS THIS TRUE ? */
1016 * Not any more. Scenario: someone does a write()
1017 * in data=journal mode. The buffer's transaction has
1018 * moved into commit. Then someone does another
1019 * write() to the file. We do the frozen data copyout
1020 * and set b_next_transaction to point to j_running_t.
1021 * And while we're in that state, someone does a
1022 * writepage() in an attempt to pageout the same area
1023 * of the file via a shared mapping. At present that
1024 * calls jbd2_journal_dirty_data(), and we get right here.
1025 * It may be too late to journal the data. Simply
1026 * falling through to the next test will suffice: the
1027 * data will be dirty and wil be checkpointed. The
1028 * ordering comments in the next comment block still
1031 //J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1034 * If we're journalling data, and this buffer was
1035 * subject to a write(), it could be metadata, forget
1036 * or shadow against the committing transaction. Now,
1037 * someone has dirtied the same darn page via a mapping
1038 * and it is being writepage()'d.
1039 * We *could* just steal the page from commit, with some
1040 * fancy locking there. Instead, we just skip it -
1041 * don't tie the page's buffers to the new transaction
1043 * Implication: if we crash before the writepage() data
1044 * is written into the filesystem, recovery will replay
1047 if (jh->b_jlist != BJ_None &&
1048 jh->b_jlist != BJ_SyncData &&
1049 jh->b_jlist != BJ_Locked) {
1050 JBUFFER_TRACE(jh, "Not stealing");
1055 * This buffer may be undergoing writeout in commit. We
1056 * can't return from here and let the caller dirty it
1057 * again because that can cause the write-out loop in
1058 * commit to never terminate.
1060 if (buffer_dirty(bh)) {
1062 spin_unlock(&journal->j_list_lock);
1063 jbd_unlock_bh_state(bh);
1065 sync_dirty_buffer(bh);
1066 jbd_lock_bh_state(bh);
1067 spin_lock(&journal->j_list_lock);
1068 /* Since we dropped the lock... */
1069 if (!buffer_mapped(bh)) {
1070 JBUFFER_TRACE(jh, "buffer got unmapped");
1073 /* The buffer may become locked again at any
1074 time if it is redirtied */
1077 /* journal_clean_data_list() may have got there first */
1078 if (jh->b_transaction != NULL) {
1079 JBUFFER_TRACE(jh, "unfile from commit");
1080 __jbd2_journal_temp_unlink_buffer(jh);
1081 /* It still points to the committing
1082 * transaction; move it to this one so
1083 * that the refile assert checks are
1085 jh->b_transaction = handle->h_transaction;
1087 /* The buffer will be refiled below */
1091 * Special case --- the buffer might actually have been
1092 * allocated and then immediately deallocated in the previous,
1093 * committing transaction, so might still be left on that
1094 * transaction's metadata lists.
1096 if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
1097 JBUFFER_TRACE(jh, "not on correct data list: unfile");
1098 J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
1099 __jbd2_journal_temp_unlink_buffer(jh);
1100 jh->b_transaction = handle->h_transaction;
1101 JBUFFER_TRACE(jh, "file as data");
1102 __jbd2_journal_file_buffer(jh, handle->h_transaction,
1106 JBUFFER_TRACE(jh, "not on a transaction");
1107 __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
1110 spin_unlock(&journal->j_list_lock);
1111 jbd_unlock_bh_state(bh);
1113 BUFFER_TRACE(bh, "brelse");
1116 JBUFFER_TRACE(jh, "exit");
1117 jbd2_journal_put_journal_head(jh);
1122 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1123 * @handle: transaction to add buffer to.
1124 * @bh: buffer to mark
1126 * mark dirty metadata which needs to be journaled as part of the current
1129 * The buffer is placed on the transaction's metadata list and is marked
1130 * as belonging to the transaction.
1132 * Returns error number or 0 on success.
1134 * Special care needs to be taken if the buffer already belongs to the
1135 * current committing transaction (in which case we should have frozen
1136 * data present for that commit). In that case, we don't relink the
1137 * buffer: that only gets done when the old transaction finally
1138 * completes its commit.
1140 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1142 transaction_t *transaction = handle->h_transaction;
1143 journal_t *journal = transaction->t_journal;
1144 struct journal_head *jh = bh2jh(bh);
1146 jbd_debug(5, "journal_head %p\n", jh);
1147 JBUFFER_TRACE(jh, "entry");
1148 if (is_handle_aborted(handle))
1151 jbd_lock_bh_state(bh);
1153 if (jh->b_modified == 0) {
1155 * This buffer's got modified and becoming part
1156 * of the transaction. This needs to be done
1157 * once a transaction -bzzz
1160 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1161 handle->h_buffer_credits--;
1165 * fastpath, to avoid expensive locking. If this buffer is already
1166 * on the running transaction's metadata list there is nothing to do.
1167 * Nobody can take it off again because there is a handle open.
1168 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1169 * result in this test being false, so we go in and take the locks.
1171 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1172 JBUFFER_TRACE(jh, "fastpath");
1173 J_ASSERT_JH(jh, jh->b_transaction ==
1174 journal->j_running_transaction);
1178 set_buffer_jbddirty(bh);
1181 * Metadata already on the current transaction list doesn't
1182 * need to be filed. Metadata on another transaction's list must
1183 * be committing, and will be refiled once the commit completes:
1184 * leave it alone for now.
1186 if (jh->b_transaction != transaction) {
1187 JBUFFER_TRACE(jh, "already on other transaction");
1188 J_ASSERT_JH(jh, jh->b_transaction ==
1189 journal->j_committing_transaction);
1190 J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
1191 /* And this case is illegal: we can't reuse another
1192 * transaction's data buffer, ever. */
1196 /* That test should have eliminated the following case: */
1197 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1199 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1200 spin_lock(&journal->j_list_lock);
1201 __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1202 spin_unlock(&journal->j_list_lock);
1204 jbd_unlock_bh_state(bh);
1206 JBUFFER_TRACE(jh, "exit");
1211 * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1212 * updates, if the update decided in the end that it didn't need access.
1216 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1218 BUFFER_TRACE(bh, "entry");
1222 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1223 * @handle: transaction handle
1224 * @bh: bh to 'forget'
1226 * We can only do the bforget if there are no commits pending against the
1227 * buffer. If the buffer is dirty in the current running transaction we
1228 * can safely unlink it.
1230 * bh may not be a journalled buffer at all - it may be a non-JBD
1231 * buffer which came off the hashtable. Check for this.
1233 * Decrements bh->b_count by one.
1235 * Allow this call even if the handle has aborted --- it may be part of
1236 * the caller's cleanup after an abort.
1238 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1240 transaction_t *transaction = handle->h_transaction;
1241 journal_t *journal = transaction->t_journal;
1242 struct journal_head *jh;
1243 int drop_reserve = 0;
1245 int was_modified = 0;
1247 BUFFER_TRACE(bh, "entry");
1249 jbd_lock_bh_state(bh);
1250 spin_lock(&journal->j_list_lock);
1252 if (!buffer_jbd(bh))
1256 /* Critical error: attempting to delete a bitmap buffer, maybe?
1257 * Don't do any jbd operations, and return an error. */
1258 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1259 "inconsistent data on disk")) {
1264 /* keep track of wether or not this transaction modified us */
1265 was_modified = jh->b_modified;
1268 * The buffer's going from the transaction, we must drop
1269 * all references -bzzz
1273 if (jh->b_transaction == handle->h_transaction) {
1274 J_ASSERT_JH(jh, !jh->b_frozen_data);
1276 /* If we are forgetting a buffer which is already part
1277 * of this transaction, then we can just drop it from
1278 * the transaction immediately. */
1279 clear_buffer_dirty(bh);
1280 clear_buffer_jbddirty(bh);
1282 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1285 * we only want to drop a reference if this transaction
1286 * modified the buffer
1292 * We are no longer going to journal this buffer.
1293 * However, the commit of this transaction is still
1294 * important to the buffer: the delete that we are now
1295 * processing might obsolete an old log entry, so by
1296 * committing, we can satisfy the buffer's checkpoint.
1298 * So, if we have a checkpoint on the buffer, we should
1299 * now refile the buffer on our BJ_Forget list so that
1300 * we know to remove the checkpoint after we commit.
1303 if (jh->b_cp_transaction) {
1304 __jbd2_journal_temp_unlink_buffer(jh);
1305 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1307 __jbd2_journal_unfile_buffer(jh);
1308 jbd2_journal_remove_journal_head(bh);
1310 if (!buffer_jbd(bh)) {
1311 spin_unlock(&journal->j_list_lock);
1312 jbd_unlock_bh_state(bh);
1317 } else if (jh->b_transaction) {
1318 J_ASSERT_JH(jh, (jh->b_transaction ==
1319 journal->j_committing_transaction));
1320 /* However, if the buffer is still owned by a prior
1321 * (committing) transaction, we can't drop it yet... */
1322 JBUFFER_TRACE(jh, "belongs to older transaction");
1323 /* ... but we CAN drop it from the new transaction if we
1324 * have also modified it since the original commit. */
1326 if (jh->b_next_transaction) {
1327 J_ASSERT(jh->b_next_transaction == transaction);
1328 jh->b_next_transaction = NULL;
1331 * only drop a reference if this transaction modified
1340 spin_unlock(&journal->j_list_lock);
1341 jbd_unlock_bh_state(bh);
1345 /* no need to reserve log space for this block -bzzz */
1346 handle->h_buffer_credits++;
1352 * int jbd2_journal_stop() - complete a transaction
1353 * @handle: tranaction to complete.
1355 * All done for a particular handle.
1357 * There is not much action needed here. We just return any remaining
1358 * buffer credits to the transaction and remove the handle. The only
1359 * complication is that we need to start a commit operation if the
1360 * filesystem is marked for synchronous update.
1362 * jbd2_journal_stop itself will not usually return an error, but it may
1363 * do so in unusual circumstances. In particular, expect it to
1364 * return -EIO if a jbd2_journal_abort has been executed since the
1365 * transaction began.
1367 int jbd2_journal_stop(handle_t *handle)
1369 transaction_t *transaction = handle->h_transaction;
1370 journal_t *journal = transaction->t_journal;
1371 int old_handle_count, err;
1374 J_ASSERT(journal_current_handle() == handle);
1376 if (is_handle_aborted(handle))
1379 J_ASSERT(transaction->t_updates > 0);
1383 if (--handle->h_ref > 0) {
1384 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1389 jbd_debug(4, "Handle %p going down\n", handle);
1392 * Implement synchronous transaction batching. If the handle
1393 * was synchronous, don't force a commit immediately. Let's
1394 * yield and let another thread piggyback onto this transaction.
1395 * Keep doing that while new threads continue to arrive.
1396 * It doesn't cost much - we're about to run a commit and sleep
1397 * on IO anyway. Speeds up many-threaded, many-dir operations
1400 * But don't do this if this process was the most recent one to
1401 * perform a synchronous write. We do this to detect the case where a
1402 * single process is doing a stream of sync writes. No point in waiting
1403 * for joiners in that case.
1406 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1407 journal->j_last_sync_writer = pid;
1409 old_handle_count = transaction->t_handle_count;
1410 schedule_timeout_uninterruptible(1);
1411 } while (old_handle_count != transaction->t_handle_count);
1414 current->journal_info = NULL;
1415 spin_lock(&journal->j_state_lock);
1416 spin_lock(&transaction->t_handle_lock);
1417 transaction->t_outstanding_credits -= handle->h_buffer_credits;
1418 transaction->t_updates--;
1419 if (!transaction->t_updates) {
1420 wake_up(&journal->j_wait_updates);
1421 if (journal->j_barrier_count)
1422 wake_up(&journal->j_wait_transaction_locked);
1426 * If the handle is marked SYNC, we need to set another commit
1427 * going! We also want to force a commit if the current
1428 * transaction is occupying too much of the log, or if the
1429 * transaction is too old now.
1431 if (handle->h_sync ||
1432 transaction->t_outstanding_credits >
1433 journal->j_max_transaction_buffers ||
1434 time_after_eq(jiffies, transaction->t_expires)) {
1435 /* Do this even for aborted journals: an abort still
1436 * completes the commit thread, it just doesn't write
1437 * anything to disk. */
1438 tid_t tid = transaction->t_tid;
1440 spin_unlock(&transaction->t_handle_lock);
1441 jbd_debug(2, "transaction too old, requesting commit for "
1442 "handle %p\n", handle);
1443 /* This is non-blocking */
1444 __jbd2_log_start_commit(journal, transaction->t_tid);
1445 spin_unlock(&journal->j_state_lock);
1448 * Special case: JBD2_SYNC synchronous updates require us
1449 * to wait for the commit to complete.
1451 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1452 err = jbd2_log_wait_commit(journal, tid);
1454 spin_unlock(&transaction->t_handle_lock);
1455 spin_unlock(&journal->j_state_lock);
1458 lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
1460 jbd2_free_handle(handle);
1465 * int jbd2_journal_force_commit() - force any uncommitted transactions
1466 * @journal: journal to force
1468 * For synchronous operations: force any uncommitted transactions
1469 * to disk. May seem kludgy, but it reuses all the handle batching
1470 * code in a very simple manner.
1472 int jbd2_journal_force_commit(journal_t *journal)
1477 handle = jbd2_journal_start(journal, 1);
1478 if (IS_ERR(handle)) {
1479 ret = PTR_ERR(handle);
1482 ret = jbd2_journal_stop(handle);
1489 * List management code snippets: various functions for manipulating the
1490 * transaction buffer lists.
1495 * Append a buffer to a transaction list, given the transaction's list head
1498 * j_list_lock is held.
1500 * jbd_lock_bh_state(jh2bh(jh)) is held.
1504 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1507 jh->b_tnext = jh->b_tprev = jh;
1510 /* Insert at the tail of the list to preserve order */
1511 struct journal_head *first = *list, *last = first->b_tprev;
1513 jh->b_tnext = first;
1514 last->b_tnext = first->b_tprev = jh;
1519 * Remove a buffer from a transaction list, given the transaction's list
1522 * Called with j_list_lock held, and the journal may not be locked.
1524 * jbd_lock_bh_state(jh2bh(jh)) is held.
1528 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1531 *list = jh->b_tnext;
1535 jh->b_tprev->b_tnext = jh->b_tnext;
1536 jh->b_tnext->b_tprev = jh->b_tprev;
1540 * Remove a buffer from the appropriate transaction list.
1542 * Note that this function can *change* the value of
1543 * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
1544 * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller
1545 * is holding onto a copy of one of thee pointers, it could go bad.
1546 * Generally the caller needs to re-read the pointer from the transaction_t.
1548 * Called under j_list_lock. The journal may not be locked.
1550 void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1552 struct journal_head **list = NULL;
1553 transaction_t *transaction;
1554 struct buffer_head *bh = jh2bh(jh);
1556 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1557 transaction = jh->b_transaction;
1559 assert_spin_locked(&transaction->t_journal->j_list_lock);
1561 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1562 if (jh->b_jlist != BJ_None)
1563 J_ASSERT_JH(jh, transaction != NULL);
1565 switch (jh->b_jlist) {
1569 list = &transaction->t_sync_datalist;
1572 transaction->t_nr_buffers--;
1573 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1574 list = &transaction->t_buffers;
1577 list = &transaction->t_forget;
1580 list = &transaction->t_iobuf_list;
1583 list = &transaction->t_shadow_list;
1586 list = &transaction->t_log_list;
1589 list = &transaction->t_reserved_list;
1592 list = &transaction->t_locked_list;
1596 __blist_del_buffer(list, jh);
1597 jh->b_jlist = BJ_None;
1598 if (test_clear_buffer_jbddirty(bh))
1599 mark_buffer_dirty(bh); /* Expose it to the VM */
1602 void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1604 __jbd2_journal_temp_unlink_buffer(jh);
1605 jh->b_transaction = NULL;
1608 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1610 jbd_lock_bh_state(jh2bh(jh));
1611 spin_lock(&journal->j_list_lock);
1612 __jbd2_journal_unfile_buffer(jh);
1613 spin_unlock(&journal->j_list_lock);
1614 jbd_unlock_bh_state(jh2bh(jh));
1618 * Called from jbd2_journal_try_to_free_buffers().
1620 * Called under jbd_lock_bh_state(bh)
1623 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1625 struct journal_head *jh;
1629 if (buffer_locked(bh) || buffer_dirty(bh))
1632 if (jh->b_next_transaction != NULL)
1635 spin_lock(&journal->j_list_lock);
1636 if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
1637 if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
1638 /* A written-back ordered data buffer */
1639 JBUFFER_TRACE(jh, "release data");
1640 __jbd2_journal_unfile_buffer(jh);
1641 jbd2_journal_remove_journal_head(bh);
1644 } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1645 /* written-back checkpointed metadata buffer */
1646 if (jh->b_jlist == BJ_None) {
1647 JBUFFER_TRACE(jh, "remove from checkpoint list");
1648 __jbd2_journal_remove_checkpoint(jh);
1649 jbd2_journal_remove_journal_head(bh);
1653 spin_unlock(&journal->j_list_lock);
1659 * jbd2_journal_try_to_free_buffers() could race with
1660 * jbd2_journal_commit_transaction(). The later might still hold the
1661 * reference count to the buffers when inspecting them on
1662 * t_syncdata_list or t_locked_list.
1664 * jbd2_journal_try_to_free_buffers() will call this function to
1665 * wait for the current transaction to finish syncing data buffers, before
1666 * try to free that buffer.
1668 * Called with journal->j_state_lock hold.
1670 static void jbd2_journal_wait_for_transaction_sync_data(journal_t *journal)
1672 transaction_t *transaction;
1675 spin_lock(&journal->j_state_lock);
1676 transaction = journal->j_committing_transaction;
1679 spin_unlock(&journal->j_state_lock);
1683 tid = transaction->t_tid;
1684 spin_unlock(&journal->j_state_lock);
1685 jbd2_log_wait_commit(journal, tid);
1689 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1690 * @journal: journal for operation
1691 * @page: to try and free
1692 * @gfp_mask: we use the mask to detect how hard should we try to release
1693 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1694 * release the buffers.
1697 * For all the buffers on this page,
1698 * if they are fully written out ordered data, move them onto BUF_CLEAN
1699 * so try_to_free_buffers() can reap them.
1701 * This function returns non-zero if we wish try_to_free_buffers()
1702 * to be called. We do this if the page is releasable by try_to_free_buffers().
1703 * We also do it if the page has locked or dirty buffers and the caller wants
1704 * us to perform sync or async writeout.
1706 * This complicates JBD locking somewhat. We aren't protected by the
1707 * BKL here. We wish to remove the buffer from its committing or
1708 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1710 * This may *change* the value of transaction_t->t_datalist, so anyone
1711 * who looks at t_datalist needs to lock against this function.
1713 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1714 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1715 * will come out of the lock with the buffer dirty, which makes it
1716 * ineligible for release here.
1718 * Who else is affected by this? hmm... Really the only contender
1719 * is do_get_write_access() - it could be looking at the buffer while
1720 * journal_try_to_free_buffer() is changing its state. But that
1721 * cannot happen because we never reallocate freed data as metadata
1722 * while the data is part of a transaction. Yes?
1724 * Return 0 on failure, 1 on success
1726 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1727 struct page *page, gfp_t gfp_mask)
1729 struct buffer_head *head;
1730 struct buffer_head *bh;
1733 J_ASSERT(PageLocked(page));
1735 head = page_buffers(page);
1738 struct journal_head *jh;
1741 * We take our own ref against the journal_head here to avoid
1742 * having to add tons of locking around each instance of
1743 * jbd2_journal_remove_journal_head() and
1744 * jbd2_journal_put_journal_head().
1746 jh = jbd2_journal_grab_journal_head(bh);
1750 jbd_lock_bh_state(bh);
1751 __journal_try_to_free_buffer(journal, bh);
1752 jbd2_journal_put_journal_head(jh);
1753 jbd_unlock_bh_state(bh);
1756 } while ((bh = bh->b_this_page) != head);
1758 ret = try_to_free_buffers(page);
1761 * There are a number of places where jbd2_journal_try_to_free_buffers()
1762 * could race with jbd2_journal_commit_transaction(), the later still
1763 * holds the reference to the buffers to free while processing them.
1764 * try_to_free_buffers() failed to free those buffers. Some of the
1765 * caller of releasepage() request page buffers to be dropped, otherwise
1766 * treat the fail-to-free as errors (such as generic_file_direct_IO())
1768 * So, if the caller of try_to_release_page() wants the synchronous
1769 * behaviour(i.e make sure buffers are dropped upon return),
1770 * let's wait for the current transaction to finish flush of
1771 * dirty data buffers, then try to free those buffers again,
1772 * with the journal locked.
1774 if (ret == 0 && (gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)) {
1775 jbd2_journal_wait_for_transaction_sync_data(journal);
1776 ret = try_to_free_buffers(page);
1784 * This buffer is no longer needed. If it is on an older transaction's
1785 * checkpoint list we need to record it on this transaction's forget list
1786 * to pin this buffer (and hence its checkpointing transaction) down until
1787 * this transaction commits. If the buffer isn't on a checkpoint list, we
1789 * Returns non-zero if JBD no longer has an interest in the buffer.
1791 * Called under j_list_lock.
1793 * Called under jbd_lock_bh_state(bh).
1795 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1798 struct buffer_head *bh = jh2bh(jh);
1800 __jbd2_journal_unfile_buffer(jh);
1802 if (jh->b_cp_transaction) {
1803 JBUFFER_TRACE(jh, "on running+cp transaction");
1804 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1805 clear_buffer_jbddirty(bh);
1808 JBUFFER_TRACE(jh, "on running transaction");
1809 jbd2_journal_remove_journal_head(bh);
1816 * jbd2_journal_invalidatepage
1818 * This code is tricky. It has a number of cases to deal with.
1820 * There are two invariants which this code relies on:
1822 * i_size must be updated on disk before we start calling invalidatepage on the
1825 * This is done in ext3 by defining an ext3_setattr method which
1826 * updates i_size before truncate gets going. By maintaining this
1827 * invariant, we can be sure that it is safe to throw away any buffers
1828 * attached to the current transaction: once the transaction commits,
1829 * we know that the data will not be needed.
1831 * Note however that we can *not* throw away data belonging to the
1832 * previous, committing transaction!
1834 * Any disk blocks which *are* part of the previous, committing
1835 * transaction (and which therefore cannot be discarded immediately) are
1836 * not going to be reused in the new running transaction
1838 * The bitmap committed_data images guarantee this: any block which is
1839 * allocated in one transaction and removed in the next will be marked
1840 * as in-use in the committed_data bitmap, so cannot be reused until
1841 * the next transaction to delete the block commits. This means that
1842 * leaving committing buffers dirty is quite safe: the disk blocks
1843 * cannot be reallocated to a different file and so buffer aliasing is
1847 * The above applies mainly to ordered data mode. In writeback mode we
1848 * don't make guarantees about the order in which data hits disk --- in
1849 * particular we don't guarantee that new dirty data is flushed before
1850 * transaction commit --- so it is always safe just to discard data
1851 * immediately in that mode. --sct
1855 * The journal_unmap_buffer helper function returns zero if the buffer
1856 * concerned remains pinned as an anonymous buffer belonging to an older
1859 * We're outside-transaction here. Either or both of j_running_transaction
1860 * and j_committing_transaction may be NULL.
1862 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1864 transaction_t *transaction;
1865 struct journal_head *jh;
1869 BUFFER_TRACE(bh, "entry");
1872 * It is safe to proceed here without the j_list_lock because the
1873 * buffers cannot be stolen by try_to_free_buffers as long as we are
1874 * holding the page lock. --sct
1877 if (!buffer_jbd(bh))
1878 goto zap_buffer_unlocked;
1880 spin_lock(&journal->j_state_lock);
1881 jbd_lock_bh_state(bh);
1882 spin_lock(&journal->j_list_lock);
1884 jh = jbd2_journal_grab_journal_head(bh);
1886 goto zap_buffer_no_jh;
1888 transaction = jh->b_transaction;
1889 if (transaction == NULL) {
1890 /* First case: not on any transaction. If it
1891 * has no checkpoint link, then we can zap it:
1892 * it's a writeback-mode buffer so we don't care
1893 * if it hits disk safely. */
1894 if (!jh->b_cp_transaction) {
1895 JBUFFER_TRACE(jh, "not on any transaction: zap");
1899 if (!buffer_dirty(bh)) {
1900 /* bdflush has written it. We can drop it now */
1904 /* OK, it must be in the journal but still not
1905 * written fully to disk: it's metadata or
1906 * journaled data... */
1908 if (journal->j_running_transaction) {
1909 /* ... and once the current transaction has
1910 * committed, the buffer won't be needed any
1912 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1913 ret = __dispose_buffer(jh,
1914 journal->j_running_transaction);
1915 jbd2_journal_put_journal_head(jh);
1916 spin_unlock(&journal->j_list_lock);
1917 jbd_unlock_bh_state(bh);
1918 spin_unlock(&journal->j_state_lock);
1921 /* There is no currently-running transaction. So the
1922 * orphan record which we wrote for this file must have
1923 * passed into commit. We must attach this buffer to
1924 * the committing transaction, if it exists. */
1925 if (journal->j_committing_transaction) {
1926 JBUFFER_TRACE(jh, "give to committing trans");
1927 ret = __dispose_buffer(jh,
1928 journal->j_committing_transaction);
1929 jbd2_journal_put_journal_head(jh);
1930 spin_unlock(&journal->j_list_lock);
1931 jbd_unlock_bh_state(bh);
1932 spin_unlock(&journal->j_state_lock);
1935 /* The orphan record's transaction has
1936 * committed. We can cleanse this buffer */
1937 clear_buffer_jbddirty(bh);
1941 } else if (transaction == journal->j_committing_transaction) {
1942 JBUFFER_TRACE(jh, "on committing transaction");
1943 if (jh->b_jlist == BJ_Locked) {
1945 * The buffer is on the committing transaction's locked
1946 * list. We have the buffer locked, so I/O has
1947 * completed. So we can nail the buffer now.
1949 may_free = __dispose_buffer(jh, transaction);
1953 * If it is committing, we simply cannot touch it. We
1954 * can remove it's next_transaction pointer from the
1955 * running transaction if that is set, but nothing
1957 set_buffer_freed(bh);
1958 if (jh->b_next_transaction) {
1959 J_ASSERT(jh->b_next_transaction ==
1960 journal->j_running_transaction);
1961 jh->b_next_transaction = NULL;
1963 jbd2_journal_put_journal_head(jh);
1964 spin_unlock(&journal->j_list_lock);
1965 jbd_unlock_bh_state(bh);
1966 spin_unlock(&journal->j_state_lock);
1969 /* Good, the buffer belongs to the running transaction.
1970 * We are writing our own transaction's data, not any
1971 * previous one's, so it is safe to throw it away
1972 * (remember that we expect the filesystem to have set
1973 * i_size already for this truncate so recovery will not
1974 * expose the disk blocks we are discarding here.) */
1975 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1976 JBUFFER_TRACE(jh, "on running transaction");
1977 may_free = __dispose_buffer(jh, transaction);
1981 jbd2_journal_put_journal_head(jh);
1983 spin_unlock(&journal->j_list_lock);
1984 jbd_unlock_bh_state(bh);
1985 spin_unlock(&journal->j_state_lock);
1986 zap_buffer_unlocked:
1987 clear_buffer_dirty(bh);
1988 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1989 clear_buffer_mapped(bh);
1990 clear_buffer_req(bh);
1991 clear_buffer_new(bh);
1997 * void jbd2_journal_invalidatepage()
1998 * @journal: journal to use for flush...
1999 * @page: page to flush
2000 * @offset: length of page to invalidate.
2002 * Reap page buffers containing data after offset in page.
2005 void jbd2_journal_invalidatepage(journal_t *journal,
2007 unsigned long offset)
2009 struct buffer_head *head, *bh, *next;
2010 unsigned int curr_off = 0;
2013 if (!PageLocked(page))
2015 if (!page_has_buffers(page))
2018 /* We will potentially be playing with lists other than just the
2019 * data lists (especially for journaled data mode), so be
2020 * cautious in our locking. */
2022 head = bh = page_buffers(page);
2024 unsigned int next_off = curr_off + bh->b_size;
2025 next = bh->b_this_page;
2027 if (offset <= curr_off) {
2028 /* This block is wholly outside the truncation point */
2030 may_free &= journal_unmap_buffer(journal, bh);
2033 curr_off = next_off;
2036 } while (bh != head);
2039 if (may_free && try_to_free_buffers(page))
2040 J_ASSERT(!page_has_buffers(page));
2045 * File a buffer on the given transaction list.
2047 void __jbd2_journal_file_buffer(struct journal_head *jh,
2048 transaction_t *transaction, int jlist)
2050 struct journal_head **list = NULL;
2052 struct buffer_head *bh = jh2bh(jh);
2054 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2055 assert_spin_locked(&transaction->t_journal->j_list_lock);
2057 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2058 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2059 jh->b_transaction == NULL);
2061 if (jh->b_transaction && jh->b_jlist == jlist)
2064 /* The following list of buffer states needs to be consistent
2065 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
2068 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2069 jlist == BJ_Shadow || jlist == BJ_Forget) {
2070 if (test_clear_buffer_dirty(bh) ||
2071 test_clear_buffer_jbddirty(bh))
2075 if (jh->b_transaction)
2076 __jbd2_journal_temp_unlink_buffer(jh);
2077 jh->b_transaction = transaction;
2081 J_ASSERT_JH(jh, !jh->b_committed_data);
2082 J_ASSERT_JH(jh, !jh->b_frozen_data);
2085 list = &transaction->t_sync_datalist;
2088 transaction->t_nr_buffers++;
2089 list = &transaction->t_buffers;
2092 list = &transaction->t_forget;
2095 list = &transaction->t_iobuf_list;
2098 list = &transaction->t_shadow_list;
2101 list = &transaction->t_log_list;
2104 list = &transaction->t_reserved_list;
2107 list = &transaction->t_locked_list;
2111 __blist_add_buffer(list, jh);
2112 jh->b_jlist = jlist;
2115 set_buffer_jbddirty(bh);
2118 void jbd2_journal_file_buffer(struct journal_head *jh,
2119 transaction_t *transaction, int jlist)
2121 jbd_lock_bh_state(jh2bh(jh));
2122 spin_lock(&transaction->t_journal->j_list_lock);
2123 __jbd2_journal_file_buffer(jh, transaction, jlist);
2124 spin_unlock(&transaction->t_journal->j_list_lock);
2125 jbd_unlock_bh_state(jh2bh(jh));
2129 * Remove a buffer from its current buffer list in preparation for
2130 * dropping it from its current transaction entirely. If the buffer has
2131 * already started to be used by a subsequent transaction, refile the
2132 * buffer on that transaction's metadata list.
2134 * Called under journal->j_list_lock
2136 * Called under jbd_lock_bh_state(jh2bh(jh))
2138 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2141 struct buffer_head *bh = jh2bh(jh);
2143 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2144 if (jh->b_transaction)
2145 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2147 /* If the buffer is now unused, just drop it. */
2148 if (jh->b_next_transaction == NULL) {
2149 __jbd2_journal_unfile_buffer(jh);
2154 * It has been modified by a later transaction: add it to the new
2155 * transaction's metadata list.
2158 was_dirty = test_clear_buffer_jbddirty(bh);
2159 __jbd2_journal_temp_unlink_buffer(jh);
2160 jh->b_transaction = jh->b_next_transaction;
2161 jh->b_next_transaction = NULL;
2162 __jbd2_journal_file_buffer(jh, jh->b_transaction,
2163 jh->b_modified ? BJ_Metadata : BJ_Reserved);
2164 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2167 set_buffer_jbddirty(bh);
2171 * For the unlocked version of this call, also make sure that any
2172 * hanging journal_head is cleaned up if necessary.
2174 * __jbd2_journal_refile_buffer is usually called as part of a single locked
2175 * operation on a buffer_head, in which the caller is probably going to
2176 * be hooking the journal_head onto other lists. In that case it is up
2177 * to the caller to remove the journal_head if necessary. For the
2178 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2179 * doing anything else to the buffer so we need to do the cleanup
2180 * ourselves to avoid a jh leak.
2182 * *** The journal_head may be freed by this call! ***
2184 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2186 struct buffer_head *bh = jh2bh(jh);
2188 jbd_lock_bh_state(bh);
2189 spin_lock(&journal->j_list_lock);
2191 __jbd2_journal_refile_buffer(jh);
2192 jbd_unlock_bh_state(bh);
2193 jbd2_journal_remove_journal_head(bh);
2195 spin_unlock(&journal->j_list_lock);