2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd2.h>
19 #include <linux/marker.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/crc32.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
30 * Default IO end handler for temporary BJ_IO buffer_heads.
32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
36 set_buffer_uptodate(bh);
38 clear_buffer_uptodate(bh);
43 * When an ext4 file is truncated, it is possible that some pages are not
44 * successfully freed, because they are attached to a committing transaction.
45 * After the transaction commits, these pages are left on the LRU, with no
46 * ->mapping, and with attached buffers. These pages are trivially reclaimable
47 * by the VM, but their apparent absence upsets the VM accounting, and it makes
48 * the numbers in /proc/meminfo look odd.
50 * So here, we have a buffer which has just come off the forget list. Look to
51 * see if we can strip all buffers from the backing page.
53 * Called under lock_journal(), and possibly under journal_datalist_lock. The
54 * caller provided us with a ref against the buffer, and we drop that here.
56 static void release_buffer_page(struct buffer_head *bh)
62 if (atomic_read(&bh->b_count) != 1)
70 /* OK, it's a truncated page */
71 if (!trylock_page(page))
76 try_to_free_buffers(page);
78 page_cache_release(page);
86 * Done it all: now submit the commit record. We should have
87 * cleaned up our previous buffers by now, so if we are in abort
88 * mode we can now just skip the rest of the journal write
91 * Returns 1 if the journal needs to be aborted or 0 on success
93 static int journal_submit_commit_record(journal_t *journal,
94 transaction_t *commit_transaction,
95 struct buffer_head **cbh,
98 struct journal_head *descriptor;
99 struct commit_header *tmp;
100 struct buffer_head *bh;
102 int barrier_done = 0;
103 struct timespec now = current_kernel_time();
105 if (is_journal_aborted(journal))
108 descriptor = jbd2_journal_get_descriptor_buffer(journal);
112 bh = jh2bh(descriptor);
114 tmp = (struct commit_header *)bh->b_data;
115 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
116 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
117 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
118 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
119 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
121 if (JBD2_HAS_COMPAT_FEATURE(journal,
122 JBD2_FEATURE_COMPAT_CHECKSUM)) {
123 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
124 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
125 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
128 JBUFFER_TRACE(descriptor, "submit commit block");
130 clear_buffer_dirty(bh);
131 set_buffer_uptodate(bh);
132 bh->b_end_io = journal_end_buffer_io_sync;
134 if (journal->j_flags & JBD2_BARRIER &&
135 !JBD2_HAS_INCOMPAT_FEATURE(journal,
136 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
137 set_buffer_ordered(bh);
140 ret = submit_bh(WRITE, bh);
142 clear_buffer_ordered(bh);
144 /* is it possible for another commit to fail at roughly
145 * the same time as this one? If so, we don't want to
146 * trust the barrier flag in the super, but instead want
147 * to remember if we sent a barrier request
149 if (ret == -EOPNOTSUPP && barrier_done) {
151 "JBD: barrier-based sync failed on %s - "
152 "disabling barriers\n", journal->j_devname);
153 spin_lock(&journal->j_state_lock);
154 journal->j_flags &= ~JBD2_BARRIER;
155 spin_unlock(&journal->j_state_lock);
157 /* And try again, without the barrier */
159 set_buffer_uptodate(bh);
160 clear_buffer_dirty(bh);
161 ret = submit_bh(WRITE, bh);
168 * This function along with journal_submit_commit_record
169 * allows to write the commit record asynchronously.
171 static int journal_wait_on_commit_record(struct buffer_head *bh)
175 clear_buffer_dirty(bh);
178 if (unlikely(!buffer_uptodate(bh)))
180 put_bh(bh); /* One for getblk() */
181 jbd2_journal_put_journal_head(bh2jh(bh));
187 * write the filemap data using writepage() address_space_operations.
188 * We don't do block allocation here even for delalloc. We don't
189 * use writepages() because with dealyed allocation we may be doing
190 * block allocation in writepages().
192 static int journal_submit_inode_data_buffers(struct address_space *mapping)
195 struct writeback_control wbc = {
196 .sync_mode = WB_SYNC_ALL,
197 .nr_to_write = mapping->nrpages * 2,
199 .range_end = i_size_read(mapping->host),
203 ret = generic_writepages(mapping, &wbc);
208 * Submit all the data buffers of inode associated with the transaction to
211 * We are in a committing transaction. Therefore no new inode can be added to
212 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
213 * operate on from being released while we write out pages.
215 static int journal_submit_data_buffers(journal_t *journal,
216 transaction_t *commit_transaction)
218 struct jbd2_inode *jinode;
220 struct address_space *mapping;
222 spin_lock(&journal->j_list_lock);
223 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
224 mapping = jinode->i_vfs_inode->i_mapping;
225 jinode->i_flags |= JI_COMMIT_RUNNING;
226 spin_unlock(&journal->j_list_lock);
228 * submit the inode data buffers. We use writepage
229 * instead of writepages. Because writepages can do
230 * block allocation with delalloc. We need to write
231 * only allocated blocks here.
233 err = journal_submit_inode_data_buffers(mapping);
236 spin_lock(&journal->j_list_lock);
237 J_ASSERT(jinode->i_transaction == commit_transaction);
238 jinode->i_flags &= ~JI_COMMIT_RUNNING;
239 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
241 spin_unlock(&journal->j_list_lock);
246 * Wait for data submitted for writeout, refile inodes to proper
247 * transaction if needed.
250 static int journal_finish_inode_data_buffers(journal_t *journal,
251 transaction_t *commit_transaction)
253 struct jbd2_inode *jinode, *next_i;
256 /* For locking, see the comment in journal_submit_data_buffers() */
257 spin_lock(&journal->j_list_lock);
258 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
259 jinode->i_flags |= JI_COMMIT_RUNNING;
260 spin_unlock(&journal->j_list_lock);
261 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
264 * Because AS_EIO is cleared by
265 * wait_on_page_writeback_range(), set it again so
266 * that user process can get -EIO from fsync().
269 &jinode->i_vfs_inode->i_mapping->flags);
274 spin_lock(&journal->j_list_lock);
275 jinode->i_flags &= ~JI_COMMIT_RUNNING;
276 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
279 /* Now refile inode to proper lists */
280 list_for_each_entry_safe(jinode, next_i,
281 &commit_transaction->t_inode_list, i_list) {
282 list_del(&jinode->i_list);
283 if (jinode->i_next_transaction) {
284 jinode->i_transaction = jinode->i_next_transaction;
285 jinode->i_next_transaction = NULL;
286 list_add(&jinode->i_list,
287 &jinode->i_transaction->t_inode_list);
289 jinode->i_transaction = NULL;
292 spin_unlock(&journal->j_list_lock);
297 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
299 struct page *page = bh->b_page;
303 addr = kmap_atomic(page, KM_USER0);
304 checksum = crc32_be(crc32_sum,
305 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
306 kunmap_atomic(addr, KM_USER0);
311 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
312 unsigned long long block)
314 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
315 if (tag_bytes > JBD2_TAG_SIZE32)
316 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
320 * jbd2_journal_commit_transaction
322 * The primary function for committing a transaction to the log. This
323 * function is called by the journal thread to begin a complete commit.
325 void jbd2_journal_commit_transaction(journal_t *journal)
327 struct transaction_stats_s stats;
328 transaction_t *commit_transaction;
329 struct journal_head *jh, *new_jh, *descriptor;
330 struct buffer_head **wbuf = journal->j_wbuf;
334 unsigned long long blocknr;
336 journal_header_t *header;
337 journal_block_tag_t *tag = NULL;
342 int tag_bytes = journal_tag_bytes(journal);
343 struct buffer_head *cbh = NULL; /* For transactional checksums */
344 __u32 crc32_sum = ~0;
347 * First job: lock down the current transaction and wait for
348 * all outstanding updates to complete.
352 spin_lock(&journal->j_list_lock);
353 summarise_journal_usage(journal);
354 spin_unlock(&journal->j_list_lock);
357 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
358 if (journal->j_flags & JBD2_FLUSHED) {
359 jbd_debug(3, "super block updated\n");
360 jbd2_journal_update_superblock(journal, 1);
362 jbd_debug(3, "superblock not updated\n");
365 J_ASSERT(journal->j_running_transaction != NULL);
366 J_ASSERT(journal->j_committing_transaction == NULL);
368 commit_transaction = journal->j_running_transaction;
369 J_ASSERT(commit_transaction->t_state == T_RUNNING);
371 trace_mark(jbd2_start_commit, "dev %s transaction %d",
372 journal->j_devname, commit_transaction->t_tid);
373 jbd_debug(1, "JBD: starting commit of transaction %d\n",
374 commit_transaction->t_tid);
376 spin_lock(&journal->j_state_lock);
377 commit_transaction->t_state = T_LOCKED;
379 stats.u.run.rs_wait = commit_transaction->t_max_wait;
380 stats.u.run.rs_locked = jiffies;
381 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
382 stats.u.run.rs_locked);
384 spin_lock(&commit_transaction->t_handle_lock);
385 while (commit_transaction->t_updates) {
388 prepare_to_wait(&journal->j_wait_updates, &wait,
389 TASK_UNINTERRUPTIBLE);
390 if (commit_transaction->t_updates) {
391 spin_unlock(&commit_transaction->t_handle_lock);
392 spin_unlock(&journal->j_state_lock);
394 spin_lock(&journal->j_state_lock);
395 spin_lock(&commit_transaction->t_handle_lock);
397 finish_wait(&journal->j_wait_updates, &wait);
399 spin_unlock(&commit_transaction->t_handle_lock);
401 J_ASSERT (commit_transaction->t_outstanding_credits <=
402 journal->j_max_transaction_buffers);
405 * First thing we are allowed to do is to discard any remaining
406 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
407 * that there are no such buffers: if a large filesystem
408 * operation like a truncate needs to split itself over multiple
409 * transactions, then it may try to do a jbd2_journal_restart() while
410 * there are still BJ_Reserved buffers outstanding. These must
411 * be released cleanly from the current transaction.
413 * In this case, the filesystem must still reserve write access
414 * again before modifying the buffer in the new transaction, but
415 * we do not require it to remember exactly which old buffers it
416 * has reserved. This is consistent with the existing behaviour
417 * that multiple jbd2_journal_get_write_access() calls to the same
418 * buffer are perfectly permissable.
420 while (commit_transaction->t_reserved_list) {
421 jh = commit_transaction->t_reserved_list;
422 JBUFFER_TRACE(jh, "reserved, unused: refile");
424 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
425 * leave undo-committed data.
427 if (jh->b_committed_data) {
428 struct buffer_head *bh = jh2bh(jh);
430 jbd_lock_bh_state(bh);
431 jbd2_free(jh->b_committed_data, bh->b_size);
432 jh->b_committed_data = NULL;
433 jbd_unlock_bh_state(bh);
435 jbd2_journal_refile_buffer(journal, jh);
439 * Now try to drop any written-back buffers from the journal's
440 * checkpoint lists. We do this *before* commit because it potentially
443 spin_lock(&journal->j_list_lock);
444 __jbd2_journal_clean_checkpoint_list(journal);
445 spin_unlock(&journal->j_list_lock);
447 jbd_debug (3, "JBD: commit phase 1\n");
450 * Switch to a new revoke table.
452 jbd2_journal_switch_revoke_table(journal);
454 stats.u.run.rs_flushing = jiffies;
455 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
456 stats.u.run.rs_flushing);
458 commit_transaction->t_state = T_FLUSH;
459 journal->j_committing_transaction = commit_transaction;
460 journal->j_running_transaction = NULL;
461 commit_transaction->t_log_start = journal->j_head;
462 wake_up(&journal->j_wait_transaction_locked);
463 spin_unlock(&journal->j_state_lock);
465 jbd_debug (3, "JBD: commit phase 2\n");
468 * Now start flushing things to disk, in the order they appear
469 * on the transaction lists. Data blocks go first.
471 err = journal_submit_data_buffers(journal, commit_transaction);
473 jbd2_journal_abort(journal, err);
475 jbd2_journal_write_revoke_records(journal, commit_transaction);
477 jbd_debug(3, "JBD: commit phase 2\n");
480 * Way to go: we have now written out all of the data for a
481 * transaction! Now comes the tricky part: we need to write out
482 * metadata. Loop over the transaction's entire buffer list:
484 spin_lock(&journal->j_state_lock);
485 commit_transaction->t_state = T_COMMIT;
486 spin_unlock(&journal->j_state_lock);
488 stats.u.run.rs_logging = jiffies;
489 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
490 stats.u.run.rs_logging);
491 stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
492 stats.u.run.rs_blocks_logged = 0;
494 J_ASSERT(commit_transaction->t_nr_buffers <=
495 commit_transaction->t_outstanding_credits);
500 while (commit_transaction->t_buffers) {
502 /* Find the next buffer to be journaled... */
504 jh = commit_transaction->t_buffers;
506 /* If we're in abort mode, we just un-journal the buffer and
507 release it for background writing. */
509 if (is_journal_aborted(journal)) {
510 JBUFFER_TRACE(jh, "journal is aborting: refile");
511 jbd2_journal_refile_buffer(journal, jh);
512 /* If that was the last one, we need to clean up
513 * any descriptor buffers which may have been
514 * already allocated, even if we are now
516 if (!commit_transaction->t_buffers)
517 goto start_journal_io;
521 /* Make sure we have a descriptor block in which to
522 record the metadata buffer. */
525 struct buffer_head *bh;
527 J_ASSERT (bufs == 0);
529 jbd_debug(4, "JBD: get descriptor\n");
531 descriptor = jbd2_journal_get_descriptor_buffer(journal);
533 jbd2_journal_abort(journal, -EIO);
537 bh = jh2bh(descriptor);
538 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
539 (unsigned long long)bh->b_blocknr, bh->b_data);
540 header = (journal_header_t *)&bh->b_data[0];
541 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
542 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
543 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
545 tagp = &bh->b_data[sizeof(journal_header_t)];
546 space_left = bh->b_size - sizeof(journal_header_t);
548 set_buffer_jwrite(bh);
549 set_buffer_dirty(bh);
552 /* Record it so that we can wait for IO
554 BUFFER_TRACE(bh, "ph3: file as descriptor");
555 jbd2_journal_file_buffer(descriptor, commit_transaction,
559 /* Where is the buffer to be written? */
561 err = jbd2_journal_next_log_block(journal, &blocknr);
562 /* If the block mapping failed, just abandon the buffer
563 and repeat this loop: we'll fall into the
564 refile-on-abort condition above. */
566 jbd2_journal_abort(journal, err);
571 * start_this_handle() uses t_outstanding_credits to determine
572 * the free space in the log, but this counter is changed
573 * by jbd2_journal_next_log_block() also.
575 commit_transaction->t_outstanding_credits--;
577 /* Bump b_count to prevent truncate from stumbling over
578 the shadowed buffer! @@@ This can go if we ever get
579 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
580 atomic_inc(&jh2bh(jh)->b_count);
582 /* Make a temporary IO buffer with which to write it out
583 (this will requeue both the metadata buffer and the
584 temporary IO buffer). new_bh goes on BJ_IO*/
586 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
588 * akpm: jbd2_journal_write_metadata_buffer() sets
589 * new_bh->b_transaction to commit_transaction.
590 * We need to clean this up before we release new_bh
591 * (which is of type BJ_IO)
593 JBUFFER_TRACE(jh, "ph3: write metadata");
594 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
595 jh, &new_jh, blocknr);
596 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
597 wbuf[bufs++] = jh2bh(new_jh);
599 /* Record the new block's tag in the current descriptor
604 tag_flag |= JBD2_FLAG_ESCAPE;
606 tag_flag |= JBD2_FLAG_SAME_UUID;
608 tag = (journal_block_tag_t *) tagp;
609 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
610 tag->t_flags = cpu_to_be32(tag_flag);
612 space_left -= tag_bytes;
615 memcpy (tagp, journal->j_uuid, 16);
621 /* If there's no more to do, or if the descriptor is full,
624 if (bufs == journal->j_wbufsize ||
625 commit_transaction->t_buffers == NULL ||
626 space_left < tag_bytes + 16) {
628 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
630 /* Write an end-of-descriptor marker before
631 submitting the IOs. "tag" still points to
632 the last tag we set up. */
634 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
637 for (i = 0; i < bufs; i++) {
638 struct buffer_head *bh = wbuf[i];
642 if (JBD2_HAS_COMPAT_FEATURE(journal,
643 JBD2_FEATURE_COMPAT_CHECKSUM)) {
645 jbd2_checksum_data(crc32_sum, bh);
649 clear_buffer_dirty(bh);
650 set_buffer_uptodate(bh);
651 bh->b_end_io = journal_end_buffer_io_sync;
652 submit_bh(WRITE, bh);
655 stats.u.run.rs_blocks_logged += bufs;
657 /* Force a new descriptor to be generated next
658 time round the loop. */
664 /* Done it all: now write the commit record asynchronously. */
666 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
667 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
668 err = journal_submit_commit_record(journal, commit_transaction,
671 __jbd2_journal_abort_hard(journal);
675 * This is the right place to wait for data buffers both for ASYNC
676 * and !ASYNC commit. If commit is ASYNC, we need to wait only after
677 * the commit block went to disk (which happens above). If commit is
678 * SYNC, we need to wait for data buffers before we start writing
679 * commit block, which happens below in such setting.
681 err = journal_finish_inode_data_buffers(journal, commit_transaction);
684 "JBD2: Detected IO errors while flushing file data "
685 "on %s\n", journal->j_devname);
689 /* Lo and behold: we have just managed to send a transaction to
690 the log. Before we can commit it, wait for the IO so far to
691 complete. Control buffers being written are on the
692 transaction's t_log_list queue, and metadata buffers are on
693 the t_iobuf_list queue.
695 Wait for the buffers in reverse order. That way we are
696 less likely to be woken up until all IOs have completed, and
697 so we incur less scheduling load.
700 jbd_debug(3, "JBD: commit phase 3\n");
703 * akpm: these are BJ_IO, and j_list_lock is not needed.
704 * See __journal_try_to_free_buffer.
707 while (commit_transaction->t_iobuf_list != NULL) {
708 struct buffer_head *bh;
710 jh = commit_transaction->t_iobuf_list->b_tprev;
712 if (buffer_locked(bh)) {
719 if (unlikely(!buffer_uptodate(bh)))
722 clear_buffer_jwrite(bh);
724 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
725 jbd2_journal_unfile_buffer(journal, jh);
728 * ->t_iobuf_list should contain only dummy buffer_heads
729 * which were created by jbd2_journal_write_metadata_buffer().
731 BUFFER_TRACE(bh, "dumping temporary bh");
732 jbd2_journal_put_journal_head(jh);
734 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
735 free_buffer_head(bh);
737 /* We also have to unlock and free the corresponding
739 jh = commit_transaction->t_shadow_list->b_tprev;
741 clear_bit(BH_JWrite, &bh->b_state);
742 J_ASSERT_BH(bh, buffer_jbddirty(bh));
744 /* The metadata is now released for reuse, but we need
745 to remember it against this transaction so that when
746 we finally commit, we can do any checkpointing
748 JBUFFER_TRACE(jh, "file as BJ_Forget");
749 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
750 /* Wake up any transactions which were waiting for this
752 wake_up_bit(&bh->b_state, BH_Unshadow);
753 JBUFFER_TRACE(jh, "brelse shadowed buffer");
757 J_ASSERT (commit_transaction->t_shadow_list == NULL);
759 jbd_debug(3, "JBD: commit phase 4\n");
761 /* Here we wait for the revoke record and descriptor record buffers */
763 while (commit_transaction->t_log_list != NULL) {
764 struct buffer_head *bh;
766 jh = commit_transaction->t_log_list->b_tprev;
768 if (buffer_locked(bh)) {
770 goto wait_for_ctlbuf;
773 goto wait_for_ctlbuf;
775 if (unlikely(!buffer_uptodate(bh)))
778 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
779 clear_buffer_jwrite(bh);
780 jbd2_journal_unfile_buffer(journal, jh);
781 jbd2_journal_put_journal_head(jh);
782 __brelse(bh); /* One for getblk */
783 /* AKPM: bforget here */
787 jbd2_journal_abort(journal, err);
789 jbd_debug(3, "JBD: commit phase 5\n");
791 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
792 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
793 err = journal_submit_commit_record(journal, commit_transaction,
796 __jbd2_journal_abort_hard(journal);
798 if (!err && !is_journal_aborted(journal))
799 err = journal_wait_on_commit_record(cbh);
802 jbd2_journal_abort(journal, err);
804 /* End of a transaction! Finally, we can do checkpoint
805 processing: any buffers committed as a result of this
806 transaction can be removed from any checkpoint list it was on
809 jbd_debug(3, "JBD: commit phase 6\n");
811 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
812 J_ASSERT(commit_transaction->t_buffers == NULL);
813 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
814 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
815 J_ASSERT(commit_transaction->t_shadow_list == NULL);
816 J_ASSERT(commit_transaction->t_log_list == NULL);
820 * As there are other places (journal_unmap_buffer()) adding buffers
821 * to this list we have to be careful and hold the j_list_lock.
823 spin_lock(&journal->j_list_lock);
824 while (commit_transaction->t_forget) {
825 transaction_t *cp_transaction;
826 struct buffer_head *bh;
828 jh = commit_transaction->t_forget;
829 spin_unlock(&journal->j_list_lock);
831 jbd_lock_bh_state(bh);
832 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
833 jh->b_transaction == journal->j_running_transaction);
836 * If there is undo-protected committed data against
837 * this buffer, then we can remove it now. If it is a
838 * buffer needing such protection, the old frozen_data
839 * field now points to a committed version of the
840 * buffer, so rotate that field to the new committed
843 * Otherwise, we can just throw away the frozen data now.
845 if (jh->b_committed_data) {
846 jbd2_free(jh->b_committed_data, bh->b_size);
847 jh->b_committed_data = NULL;
848 if (jh->b_frozen_data) {
849 jh->b_committed_data = jh->b_frozen_data;
850 jh->b_frozen_data = NULL;
852 } else if (jh->b_frozen_data) {
853 jbd2_free(jh->b_frozen_data, bh->b_size);
854 jh->b_frozen_data = NULL;
857 spin_lock(&journal->j_list_lock);
858 cp_transaction = jh->b_cp_transaction;
859 if (cp_transaction) {
860 JBUFFER_TRACE(jh, "remove from old cp transaction");
861 cp_transaction->t_chp_stats.cs_dropped++;
862 __jbd2_journal_remove_checkpoint(jh);
865 /* Only re-checkpoint the buffer_head if it is marked
866 * dirty. If the buffer was added to the BJ_Forget list
867 * by jbd2_journal_forget, it may no longer be dirty and
868 * there's no point in keeping a checkpoint record for
871 /* A buffer which has been freed while still being
872 * journaled by a previous transaction may end up still
873 * being dirty here, but we want to avoid writing back
874 * that buffer in the future now that the last use has
875 * been committed. That's not only a performance gain,
876 * it also stops aliasing problems if the buffer is left
877 * behind for writeback and gets reallocated for another
878 * use in a different page. */
879 if (buffer_freed(bh)) {
880 clear_buffer_freed(bh);
881 clear_buffer_jbddirty(bh);
884 if (buffer_jbddirty(bh)) {
885 JBUFFER_TRACE(jh, "add to new checkpointing trans");
886 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
887 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
888 __jbd2_journal_refile_buffer(jh);
889 jbd_unlock_bh_state(bh);
891 J_ASSERT_BH(bh, !buffer_dirty(bh));
892 /* The buffer on BJ_Forget list and not jbddirty means
893 * it has been freed by this transaction and hence it
894 * could not have been reallocated until this
895 * transaction has committed. *BUT* it could be
896 * reallocated once we have written all the data to
897 * disk and before we process the buffer on BJ_Forget
899 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
900 __jbd2_journal_refile_buffer(jh);
901 if (!jh->b_transaction) {
902 jbd_unlock_bh_state(bh);
904 jbd2_journal_remove_journal_head(bh);
905 release_buffer_page(bh);
907 jbd_unlock_bh_state(bh);
909 cond_resched_lock(&journal->j_list_lock);
911 spin_unlock(&journal->j_list_lock);
913 * This is a bit sleazy. We use j_list_lock to protect transition
914 * of a transaction into T_FINISHED state and calling
915 * __jbd2_journal_drop_transaction(). Otherwise we could race with
916 * other checkpointing code processing the transaction...
918 spin_lock(&journal->j_state_lock);
919 spin_lock(&journal->j_list_lock);
921 * Now recheck if some buffers did not get attached to the transaction
922 * while the lock was dropped...
924 if (commit_transaction->t_forget) {
925 spin_unlock(&journal->j_list_lock);
926 spin_unlock(&journal->j_state_lock);
930 /* Done with this transaction! */
932 jbd_debug(3, "JBD: commit phase 7\n");
934 J_ASSERT(commit_transaction->t_state == T_COMMIT);
936 commit_transaction->t_start = jiffies;
937 stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging,
938 commit_transaction->t_start);
941 * File the transaction for history
943 stats.ts_type = JBD2_STATS_RUN;
944 stats.ts_tid = commit_transaction->t_tid;
945 stats.u.run.rs_handle_count = commit_transaction->t_handle_count;
946 spin_lock(&journal->j_history_lock);
947 memcpy(journal->j_history + journal->j_history_cur, &stats,
949 if (++journal->j_history_cur == journal->j_history_max)
950 journal->j_history_cur = 0;
953 * Calculate overall stats
955 journal->j_stats.ts_tid++;
956 journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait;
957 journal->j_stats.u.run.rs_running += stats.u.run.rs_running;
958 journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked;
959 journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing;
960 journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging;
961 journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count;
962 journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks;
963 journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged;
964 spin_unlock(&journal->j_history_lock);
966 commit_transaction->t_state = T_FINISHED;
967 J_ASSERT(commit_transaction == journal->j_committing_transaction);
968 journal->j_commit_sequence = commit_transaction->t_tid;
969 journal->j_committing_transaction = NULL;
970 spin_unlock(&journal->j_state_lock);
972 if (commit_transaction->t_checkpoint_list == NULL &&
973 commit_transaction->t_checkpoint_io_list == NULL) {
974 __jbd2_journal_drop_transaction(journal, commit_transaction);
976 if (journal->j_checkpoint_transactions == NULL) {
977 journal->j_checkpoint_transactions = commit_transaction;
978 commit_transaction->t_cpnext = commit_transaction;
979 commit_transaction->t_cpprev = commit_transaction;
981 commit_transaction->t_cpnext =
982 journal->j_checkpoint_transactions;
983 commit_transaction->t_cpprev =
984 commit_transaction->t_cpnext->t_cpprev;
985 commit_transaction->t_cpnext->t_cpprev =
987 commit_transaction->t_cpprev->t_cpnext =
991 spin_unlock(&journal->j_list_lock);
993 trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
994 journal->j_devname, commit_transaction->t_tid,
995 journal->j_tail_sequence);
996 jbd_debug(1, "JBD: commit %d complete, head %d\n",
997 journal->j_commit_sequence, journal->j_tail_sequence);
999 wake_up(&journal->j_wait_done_commit);