1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Defines functions of journalling api
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/kthread.h>
32 #define MLOG_MASK_PREFIX ML_JOURNAL
33 #include <cluster/masklog.h>
40 #include "extent_map.h"
41 #include "heartbeat.h"
44 #include "localalloc.h"
50 #include "buffer_head_io.h"
52 DEFINE_SPINLOCK(trans_inc_lock);
54 static int ocfs2_force_read_journal(struct inode *inode);
55 static int ocfs2_recover_node(struct ocfs2_super *osb,
56 int node_num, int slot_num);
57 static int __ocfs2_recovery_thread(void *arg);
58 static int ocfs2_commit_cache(struct ocfs2_super *osb);
59 static int ocfs2_wait_on_mount(struct ocfs2_super *osb);
60 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
61 int dirty, int replayed);
62 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
64 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
66 static int ocfs2_commit_thread(void *arg);
70 * The recovery_list is a simple linked list of node numbers to recover.
71 * It is protected by the recovery_lock.
74 struct ocfs2_recovery_map {
76 unsigned int *rm_entries;
79 int ocfs2_recovery_init(struct ocfs2_super *osb)
81 struct ocfs2_recovery_map *rm;
83 mutex_init(&osb->recovery_lock);
84 osb->disable_recovery = 0;
85 osb->recovery_thread_task = NULL;
86 init_waitqueue_head(&osb->recovery_event);
88 rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
89 osb->max_slots * sizeof(unsigned int),
96 rm->rm_entries = (unsigned int *)((char *)rm +
97 sizeof(struct ocfs2_recovery_map));
98 osb->recovery_map = rm;
103 /* we can't grab the goofy sem lock from inside wait_event, so we use
104 * memory barriers to make sure that we'll see the null task before
106 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
109 return osb->recovery_thread_task != NULL;
112 void ocfs2_recovery_exit(struct ocfs2_super *osb)
114 struct ocfs2_recovery_map *rm;
116 /* disable any new recovery threads and wait for any currently
117 * running ones to exit. Do this before setting the vol_state. */
118 mutex_lock(&osb->recovery_lock);
119 osb->disable_recovery = 1;
120 mutex_unlock(&osb->recovery_lock);
121 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
123 /* At this point, we know that no more recovery threads can be
124 * launched, so wait for any recovery completion work to
126 flush_workqueue(ocfs2_wq);
129 * Now that recovery is shut down, and the osb is about to be
130 * freed, the osb_lock is not taken here.
132 rm = osb->recovery_map;
133 /* XXX: Should we bug if there are dirty entries? */
138 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
139 unsigned int node_num)
142 struct ocfs2_recovery_map *rm = osb->recovery_map;
144 assert_spin_locked(&osb->osb_lock);
146 for (i = 0; i < rm->rm_used; i++) {
147 if (rm->rm_entries[i] == node_num)
154 /* Behaves like test-and-set. Returns the previous value */
155 static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
156 unsigned int node_num)
158 struct ocfs2_recovery_map *rm = osb->recovery_map;
160 spin_lock(&osb->osb_lock);
161 if (__ocfs2_recovery_map_test(osb, node_num)) {
162 spin_unlock(&osb->osb_lock);
166 /* XXX: Can this be exploited? Not from o2dlm... */
167 BUG_ON(rm->rm_used >= osb->max_slots);
169 rm->rm_entries[rm->rm_used] = node_num;
171 spin_unlock(&osb->osb_lock);
176 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
177 unsigned int node_num)
180 struct ocfs2_recovery_map *rm = osb->recovery_map;
182 spin_lock(&osb->osb_lock);
184 for (i = 0; i < rm->rm_used; i++) {
185 if (rm->rm_entries[i] == node_num)
189 if (i < rm->rm_used) {
190 /* XXX: be careful with the pointer math */
191 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
192 (rm->rm_used - i - 1) * sizeof(unsigned int));
196 spin_unlock(&osb->osb_lock);
199 static int ocfs2_commit_cache(struct ocfs2_super *osb)
202 unsigned int flushed;
203 unsigned long old_id;
204 struct ocfs2_journal *journal = NULL;
208 journal = osb->journal;
210 /* Flush all pending commits and checkpoint the journal. */
211 down_write(&journal->j_trans_barrier);
213 if (atomic_read(&journal->j_num_trans) == 0) {
214 up_write(&journal->j_trans_barrier);
215 mlog(0, "No transactions for me to flush!\n");
219 jbd2_journal_lock_updates(journal->j_journal);
220 status = jbd2_journal_flush(journal->j_journal);
221 jbd2_journal_unlock_updates(journal->j_journal);
223 up_write(&journal->j_trans_barrier);
228 old_id = ocfs2_inc_trans_id(journal);
230 flushed = atomic_read(&journal->j_num_trans);
231 atomic_set(&journal->j_num_trans, 0);
232 up_write(&journal->j_trans_barrier);
234 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
235 journal->j_trans_id, flushed);
237 ocfs2_wake_downconvert_thread(osb);
238 wake_up(&journal->j_checkpointed);
244 /* pass it NULL and it will allocate a new handle object for you. If
245 * you pass it a handle however, it may still return error, in which
246 * case it has free'd the passed handle for you. */
247 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
249 journal_t *journal = osb->journal->j_journal;
252 BUG_ON(!osb || !osb->journal->j_journal);
254 if (ocfs2_is_hard_readonly(osb))
255 return ERR_PTR(-EROFS);
257 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
258 BUG_ON(max_buffs <= 0);
260 /* Nested transaction? Just return the handle... */
261 if (journal_current_handle())
262 return jbd2_journal_start(journal, max_buffs);
264 down_read(&osb->journal->j_trans_barrier);
266 handle = jbd2_journal_start(journal, max_buffs);
267 if (IS_ERR(handle)) {
268 up_read(&osb->journal->j_trans_barrier);
270 mlog_errno(PTR_ERR(handle));
272 if (is_journal_aborted(journal)) {
273 ocfs2_abort(osb->sb, "Detected aborted journal");
274 handle = ERR_PTR(-EROFS);
277 if (!ocfs2_mount_local(osb))
278 atomic_inc(&(osb->journal->j_num_trans));
284 int ocfs2_commit_trans(struct ocfs2_super *osb,
288 struct ocfs2_journal *journal = osb->journal;
292 nested = handle->h_ref > 1;
293 ret = jbd2_journal_stop(handle);
298 up_read(&journal->j_trans_barrier);
304 * 'nblocks' is what you want to add to the current
305 * transaction. extend_trans will either extend the current handle by
306 * nblocks, or commit it and start a new one with nblocks credits.
308 * This might call jbd2_journal_restart() which will commit dirty buffers
309 * and then restart the transaction. Before calling
310 * ocfs2_extend_trans(), any changed blocks should have been
311 * dirtied. After calling it, all blocks which need to be changed must
312 * go through another set of journal_access/journal_dirty calls.
314 * WARNING: This will not release any semaphores or disk locks taken
315 * during the transaction, so make sure they were taken *before*
316 * start_trans or we'll have ordering deadlocks.
318 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
319 * good because transaction ids haven't yet been recorded on the
320 * cluster locks associated with this handle.
322 int ocfs2_extend_trans(handle_t *handle, int nblocks)
331 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
333 #ifdef CONFIG_OCFS2_DEBUG_FS
336 status = jbd2_journal_extend(handle, nblocks);
345 "jbd2_journal_extend failed, trying "
346 "jbd2_journal_restart\n");
347 status = jbd2_journal_restart(handle, nblocks);
361 int ocfs2_journal_access(handle_t *handle,
363 struct buffer_head *bh,
372 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
373 (unsigned long long)bh->b_blocknr, type,
374 (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
375 "OCFS2_JOURNAL_ACCESS_CREATE" :
376 "OCFS2_JOURNAL_ACCESS_WRITE",
379 /* we can safely remove this assertion after testing. */
380 if (!buffer_uptodate(bh)) {
381 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
382 mlog(ML_ERROR, "b_blocknr=%llu\n",
383 (unsigned long long)bh->b_blocknr);
387 /* Set the current transaction information on the inode so
388 * that the locking code knows whether it can drop it's locks
389 * on this inode or not. We're protected from the commit
390 * thread updating the current transaction id until
391 * ocfs2_commit_trans() because ocfs2_start_trans() took
392 * j_trans_barrier for us. */
393 ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
395 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
397 case OCFS2_JOURNAL_ACCESS_CREATE:
398 case OCFS2_JOURNAL_ACCESS_WRITE:
399 status = jbd2_journal_get_write_access(handle, bh);
402 case OCFS2_JOURNAL_ACCESS_UNDO:
403 status = jbd2_journal_get_undo_access(handle, bh);
408 mlog(ML_ERROR, "Uknown access type!\n");
410 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
413 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
420 int ocfs2_journal_dirty(handle_t *handle,
421 struct buffer_head *bh)
425 mlog_entry("(bh->b_blocknr=%llu)\n",
426 (unsigned long long)bh->b_blocknr);
428 status = jbd2_journal_dirty_metadata(handle, bh);
430 mlog(ML_ERROR, "Could not dirty metadata buffer. "
431 "(bh->b_blocknr=%llu)\n",
432 (unsigned long long)bh->b_blocknr);
438 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
440 void ocfs2_set_journal_params(struct ocfs2_super *osb)
442 journal_t *journal = osb->journal->j_journal;
443 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
445 if (osb->osb_commit_interval)
446 commit_interval = osb->osb_commit_interval;
448 spin_lock(&journal->j_state_lock);
449 journal->j_commit_interval = commit_interval;
450 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
451 journal->j_flags |= JBD2_BARRIER;
453 journal->j_flags &= ~JBD2_BARRIER;
454 spin_unlock(&journal->j_state_lock);
457 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
460 struct inode *inode = NULL; /* the journal inode */
461 journal_t *j_journal = NULL;
462 struct ocfs2_dinode *di = NULL;
463 struct buffer_head *bh = NULL;
464 struct ocfs2_super *osb;
471 osb = journal->j_osb;
473 /* already have the inode for our journal */
474 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
481 if (is_bad_inode(inode)) {
482 mlog(ML_ERROR, "access error (bad inode)\n");
489 SET_INODE_JOURNAL(inode);
490 OCFS2_I(inode)->ip_open_count++;
492 /* Skip recovery waits here - journal inode metadata never
493 * changes in a live cluster so it can be considered an
494 * exception to the rule. */
495 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
497 if (status != -ERESTARTSYS)
498 mlog(ML_ERROR, "Could not get lock on journal!\n");
503 di = (struct ocfs2_dinode *)bh->b_data;
505 if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
506 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
512 mlog(0, "inode->i_size = %lld\n", inode->i_size);
513 mlog(0, "inode->i_blocks = %llu\n",
514 (unsigned long long)inode->i_blocks);
515 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
517 /* call the kernels journal init function now */
518 j_journal = jbd2_journal_init_inode(inode);
519 if (j_journal == NULL) {
520 mlog(ML_ERROR, "Linux journal layer error\n");
525 mlog(0, "Returned from jbd2_journal_init_inode\n");
526 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
528 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
529 OCFS2_JOURNAL_DIRTY_FL);
531 journal->j_journal = j_journal;
532 journal->j_inode = inode;
535 ocfs2_set_journal_params(osb);
537 journal->j_state = OCFS2_JOURNAL_LOADED;
543 ocfs2_inode_unlock(inode, 1);
546 OCFS2_I(inode)->ip_open_count--;
555 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
557 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
560 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
562 return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
565 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
566 int dirty, int replayed)
570 struct ocfs2_journal *journal = osb->journal;
571 struct buffer_head *bh = journal->j_bh;
572 struct ocfs2_dinode *fe;
576 fe = (struct ocfs2_dinode *)bh->b_data;
578 /* The journal bh on the osb always comes from ocfs2_journal_init()
579 * and was validated there inside ocfs2_inode_lock_full(). It's a
580 * code bug if we mess it up. */
581 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
583 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
585 flags |= OCFS2_JOURNAL_DIRTY_FL;
587 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
588 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
591 ocfs2_bump_recovery_generation(fe);
593 status = ocfs2_write_block(osb, bh, journal->j_inode);
602 * If the journal has been kmalloc'd it needs to be freed after this
605 void ocfs2_journal_shutdown(struct ocfs2_super *osb)
607 struct ocfs2_journal *journal = NULL;
609 struct inode *inode = NULL;
610 int num_running_trans = 0;
616 journal = osb->journal;
620 inode = journal->j_inode;
622 if (journal->j_state != OCFS2_JOURNAL_LOADED)
625 /* need to inc inode use count - jbd2_journal_destroy will iput. */
629 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
630 if (num_running_trans > 0)
631 mlog(0, "Shutting down journal: must wait on %d "
632 "running transactions!\n",
635 /* Do a commit_cache here. It will flush our journal, *and*
636 * release any locks that are still held.
637 * set the SHUTDOWN flag and release the trans lock.
638 * the commit thread will take the trans lock for us below. */
639 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
641 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
642 * drop the trans_lock (which we want to hold until we
643 * completely destroy the journal. */
644 if (osb->commit_task) {
645 /* Wait for the commit thread */
646 mlog(0, "Waiting for ocfs2commit to exit....\n");
647 kthread_stop(osb->commit_task);
648 osb->commit_task = NULL;
651 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
653 if (ocfs2_mount_local(osb)) {
654 jbd2_journal_lock_updates(journal->j_journal);
655 status = jbd2_journal_flush(journal->j_journal);
656 jbd2_journal_unlock_updates(journal->j_journal);
663 * Do not toggle if flush was unsuccessful otherwise
664 * will leave dirty metadata in a "clean" journal
666 status = ocfs2_journal_toggle_dirty(osb, 0, 0);
671 /* Shutdown the kernel journal system */
672 jbd2_journal_destroy(journal->j_journal);
673 journal->j_journal = NULL;
675 OCFS2_I(inode)->ip_open_count--;
677 /* unlock our journal */
678 ocfs2_inode_unlock(inode, 1);
680 brelse(journal->j_bh);
681 journal->j_bh = NULL;
683 journal->j_state = OCFS2_JOURNAL_FREE;
685 // up_write(&journal->j_trans_barrier);
692 static void ocfs2_clear_journal_error(struct super_block *sb,
698 olderr = jbd2_journal_errno(journal);
700 mlog(ML_ERROR, "File system error %d recorded in "
701 "journal %u.\n", olderr, slot);
702 mlog(ML_ERROR, "File system on device %s needs checking.\n",
705 jbd2_journal_ack_err(journal);
706 jbd2_journal_clear_err(journal);
710 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
713 struct ocfs2_super *osb;
719 osb = journal->j_osb;
721 status = jbd2_journal_load(journal->j_journal);
723 mlog(ML_ERROR, "Failed to load journal!\n");
727 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
729 status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
735 /* Launch the commit thread */
737 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
739 if (IS_ERR(osb->commit_task)) {
740 status = PTR_ERR(osb->commit_task);
741 osb->commit_task = NULL;
742 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
747 osb->commit_task = NULL;
755 /* 'full' flag tells us whether we clear out all blocks or if we just
756 * mark the journal clean */
757 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
765 status = jbd2_journal_wipe(journal->j_journal, full);
771 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
780 static int ocfs2_recovery_completed(struct ocfs2_super *osb)
783 struct ocfs2_recovery_map *rm = osb->recovery_map;
785 spin_lock(&osb->osb_lock);
786 empty = (rm->rm_used == 0);
787 spin_unlock(&osb->osb_lock);
792 void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
794 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
798 * JBD Might read a cached version of another nodes journal file. We
799 * don't want this as this file changes often and we get no
800 * notification on those changes. The only way to be sure that we've
801 * got the most up to date version of those blocks then is to force
802 * read them off disk. Just searching through the buffer cache won't
803 * work as there may be pages backing this file which are still marked
804 * up to date. We know things can't change on this file underneath us
805 * as we have the lock by now :)
807 static int ocfs2_force_read_journal(struct inode *inode)
811 u64 v_blkno, p_blkno, p_blocks, num_blocks;
812 #define CONCURRENT_JOURNAL_FILL 32ULL
813 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
817 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
819 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
821 while (v_blkno < num_blocks) {
822 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
823 &p_blkno, &p_blocks, NULL);
829 if (p_blocks > CONCURRENT_JOURNAL_FILL)
830 p_blocks = CONCURRENT_JOURNAL_FILL;
832 /* We are reading journal data which should not
833 * be put in the uptodate cache */
834 status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
835 p_blkno, p_blocks, bhs);
841 for(i = 0; i < p_blocks; i++) {
850 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
856 struct ocfs2_la_recovery_item {
857 struct list_head lri_list;
859 struct ocfs2_dinode *lri_la_dinode;
860 struct ocfs2_dinode *lri_tl_dinode;
861 struct ocfs2_quota_recovery *lri_qrec;
864 /* Does the second half of the recovery process. By this point, the
865 * node is marked clean and can actually be considered recovered,
866 * hence it's no longer in the recovery map, but there's still some
867 * cleanup we can do which shouldn't happen within the recovery thread
868 * as locking in that context becomes very difficult if we are to take
869 * recovering nodes into account.
871 * NOTE: This function can and will sleep on recovery of other nodes
872 * during cluster locking, just like any other ocfs2 process.
874 void ocfs2_complete_recovery(struct work_struct *work)
877 struct ocfs2_journal *journal =
878 container_of(work, struct ocfs2_journal, j_recovery_work);
879 struct ocfs2_super *osb = journal->j_osb;
880 struct ocfs2_dinode *la_dinode, *tl_dinode;
881 struct ocfs2_la_recovery_item *item, *n;
882 struct ocfs2_quota_recovery *qrec;
883 LIST_HEAD(tmp_la_list);
887 mlog(0, "completing recovery from keventd\n");
889 spin_lock(&journal->j_lock);
890 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
891 spin_unlock(&journal->j_lock);
893 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
894 list_del_init(&item->lri_list);
896 mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
898 la_dinode = item->lri_la_dinode;
900 mlog(0, "Clean up local alloc %llu\n",
901 (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
903 ret = ocfs2_complete_local_alloc_recovery(osb,
911 tl_dinode = item->lri_tl_dinode;
913 mlog(0, "Clean up truncate log %llu\n",
914 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
916 ret = ocfs2_complete_truncate_log_recovery(osb,
924 ret = ocfs2_recover_orphans(osb, item->lri_slot);
928 qrec = item->lri_qrec;
930 mlog(0, "Recovering quota files");
931 ret = ocfs2_finish_quota_recovery(osb, qrec,
935 /* Recovery info is already freed now */
941 mlog(0, "Recovery completion\n");
945 /* NOTE: This function always eats your references to la_dinode and
946 * tl_dinode, either manually on error, or by passing them to
947 * ocfs2_complete_recovery */
948 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
950 struct ocfs2_dinode *la_dinode,
951 struct ocfs2_dinode *tl_dinode,
952 struct ocfs2_quota_recovery *qrec)
954 struct ocfs2_la_recovery_item *item;
956 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
958 /* Though we wish to avoid it, we are in fact safe in
959 * skipping local alloc cleanup as fsck.ocfs2 is more
960 * than capable of reclaiming unused space. */
968 ocfs2_free_quota_recovery(qrec);
974 INIT_LIST_HEAD(&item->lri_list);
975 item->lri_la_dinode = la_dinode;
976 item->lri_slot = slot_num;
977 item->lri_tl_dinode = tl_dinode;
978 item->lri_qrec = qrec;
980 spin_lock(&journal->j_lock);
981 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
982 queue_work(ocfs2_wq, &journal->j_recovery_work);
983 spin_unlock(&journal->j_lock);
986 /* Called by the mount code to queue recovery the last part of
987 * recovery for it's own slot. */
988 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
990 struct ocfs2_journal *journal = osb->journal;
993 /* No need to queue up our truncate_log as regular
994 * cleanup will catch that. */
995 ocfs2_queue_recovery_completion(journal,
997 osb->local_alloc_copy,
1000 ocfs2_schedule_truncate_log_flush(osb, 0);
1002 osb->local_alloc_copy = NULL;
1007 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
1009 if (osb->quota_rec) {
1010 ocfs2_queue_recovery_completion(osb->journal,
1015 osb->quota_rec = NULL;
1019 static int __ocfs2_recovery_thread(void *arg)
1021 int status, node_num, slot_num;
1022 struct ocfs2_super *osb = arg;
1023 struct ocfs2_recovery_map *rm = osb->recovery_map;
1024 int *rm_quota = NULL;
1025 int rm_quota_used = 0, i;
1026 struct ocfs2_quota_recovery *qrec;
1030 status = ocfs2_wait_on_mount(osb);
1035 rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
1041 status = ocfs2_super_lock(osb, 1);
1047 spin_lock(&osb->osb_lock);
1048 while (rm->rm_used) {
1049 /* It's always safe to remove entry zero, as we won't
1050 * clear it until ocfs2_recover_node() has succeeded. */
1051 node_num = rm->rm_entries[0];
1052 spin_unlock(&osb->osb_lock);
1053 mlog(0, "checking node %d\n", node_num);
1054 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1055 if (slot_num == -ENOENT) {
1057 mlog(0, "no slot for this node, so no recovery"
1061 mlog(0, "node %d was using slot %d\n", node_num, slot_num);
1063 /* It is a bit subtle with quota recovery. We cannot do it
1064 * immediately because we have to obtain cluster locks from
1065 * quota files and we also don't want to just skip it because
1066 * then quota usage would be out of sync until some node takes
1067 * the slot. So we remember which nodes need quota recovery
1068 * and when everything else is done, we recover quotas. */
1069 for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
1070 if (i == rm_quota_used)
1071 rm_quota[rm_quota_used++] = slot_num;
1073 status = ocfs2_recover_node(osb, node_num, slot_num);
1076 ocfs2_recovery_map_clear(osb, node_num);
1079 "Error %d recovering node %d on device (%u,%u)!\n",
1081 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1082 mlog(ML_ERROR, "Volume requires unmount.\n");
1085 spin_lock(&osb->osb_lock);
1087 spin_unlock(&osb->osb_lock);
1088 mlog(0, "All nodes recovered\n");
1090 /* Refresh all journal recovery generations from disk */
1091 status = ocfs2_check_journals_nolocks(osb);
1092 status = (status == -EROFS) ? 0 : status;
1096 /* Now it is right time to recover quotas... We have to do this under
1097 * superblock lock so that noone can start using the slot (and crash)
1098 * before we recover it */
1099 for (i = 0; i < rm_quota_used; i++) {
1100 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
1102 status = PTR_ERR(qrec);
1106 ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
1110 ocfs2_super_unlock(osb, 1);
1112 /* We always run recovery on our own orphan dir - the dead
1113 * node(s) may have disallowd a previos inode delete. Re-processing
1114 * is therefore required. */
1115 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
1119 mutex_lock(&osb->recovery_lock);
1120 if (!status && !ocfs2_recovery_completed(osb)) {
1121 mutex_unlock(&osb->recovery_lock);
1125 osb->recovery_thread_task = NULL;
1126 mb(); /* sync with ocfs2_recovery_thread_running */
1127 wake_up(&osb->recovery_event);
1129 mutex_unlock(&osb->recovery_lock);
1135 /* no one is callint kthread_stop() for us so the kthread() api
1136 * requires that we call do_exit(). And it isn't exported, but
1137 * complete_and_exit() seems to be a minimal wrapper around it. */
1138 complete_and_exit(NULL, status);
1142 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1144 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1145 node_num, osb->node_num);
1147 mutex_lock(&osb->recovery_lock);
1148 if (osb->disable_recovery)
1151 /* People waiting on recovery will wait on
1152 * the recovery map to empty. */
1153 if (ocfs2_recovery_map_set(osb, node_num))
1154 mlog(0, "node %d already in recovery map.\n", node_num);
1156 mlog(0, "starting recovery thread...\n");
1158 if (osb->recovery_thread_task)
1161 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1163 if (IS_ERR(osb->recovery_thread_task)) {
1164 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1165 osb->recovery_thread_task = NULL;
1169 mutex_unlock(&osb->recovery_lock);
1170 wake_up(&osb->recovery_event);
1175 static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
1177 struct buffer_head **bh,
1178 struct inode **ret_inode)
1180 int status = -EACCES;
1181 struct inode *inode = NULL;
1183 BUG_ON(slot_num >= osb->max_slots);
1185 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1187 if (!inode || is_bad_inode(inode)) {
1191 SET_INODE_JOURNAL(inode);
1193 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
1203 if (status || !ret_inode)
1211 /* Does the actual journal replay and marks the journal inode as
1212 * clean. Will only replay if the journal inode is marked dirty. */
1213 static int ocfs2_replay_journal(struct ocfs2_super *osb,
1220 struct inode *inode = NULL;
1221 struct ocfs2_dinode *fe;
1222 journal_t *journal = NULL;
1223 struct buffer_head *bh = NULL;
1226 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
1232 fe = (struct ocfs2_dinode *)bh->b_data;
1233 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1238 * As the fs recovery is asynchronous, there is a small chance that
1239 * another node mounted (and recovered) the slot before the recovery
1240 * thread could get the lock. To handle that, we dirty read the journal
1241 * inode for that slot to get the recovery generation. If it is
1242 * different than what we expected, the slot has been recovered.
1243 * If not, it needs recovery.
1245 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1246 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
1247 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1248 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1253 /* Continue with recovery as the journal has not yet been recovered */
1255 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1257 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
1258 if (status != -ERESTARTSYS)
1259 mlog(ML_ERROR, "Could not lock journal!\n");
1264 fe = (struct ocfs2_dinode *) bh->b_data;
1266 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1267 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1269 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1270 mlog(0, "No recovery required for node %d\n", node_num);
1271 /* Refresh recovery generation for the slot */
1272 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1276 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
1278 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1280 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1282 status = ocfs2_force_read_journal(inode);
1288 mlog(0, "calling journal_init_inode\n");
1289 journal = jbd2_journal_init_inode(inode);
1290 if (journal == NULL) {
1291 mlog(ML_ERROR, "Linux journal layer error\n");
1296 status = jbd2_journal_load(journal);
1301 jbd2_journal_destroy(journal);
1305 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1307 /* wipe the journal */
1308 mlog(0, "flushing the journal.\n");
1309 jbd2_journal_lock_updates(journal);
1310 status = jbd2_journal_flush(journal);
1311 jbd2_journal_unlock_updates(journal);
1315 /* This will mark the node clean */
1316 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1317 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1318 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1320 /* Increment recovery generation to indicate successful recovery */
1321 ocfs2_bump_recovery_generation(fe);
1322 osb->slot_recovery_generations[slot_num] =
1323 ocfs2_get_recovery_generation(fe);
1325 status = ocfs2_write_block(osb, bh, inode);
1332 jbd2_journal_destroy(journal);
1335 /* drop the lock on this nodes journal */
1337 ocfs2_inode_unlock(inode, 1);
1349 * Do the most important parts of node recovery:
1350 * - Replay it's journal
1351 * - Stamp a clean local allocator file
1352 * - Stamp a clean truncate log
1353 * - Mark the node clean
1355 * If this function completes without error, a node in OCFS2 can be
1356 * said to have been safely recovered. As a result, failure during the
1357 * second part of a nodes recovery process (local alloc recovery) is
1358 * far less concerning.
1360 static int ocfs2_recover_node(struct ocfs2_super *osb,
1361 int node_num, int slot_num)
1364 struct ocfs2_dinode *la_copy = NULL;
1365 struct ocfs2_dinode *tl_copy = NULL;
1367 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
1368 node_num, slot_num, osb->node_num);
1370 /* Should not ever be called to recover ourselves -- in that
1371 * case we should've called ocfs2_journal_load instead. */
1372 BUG_ON(osb->node_num == node_num);
1374 status = ocfs2_replay_journal(osb, node_num, slot_num);
1376 if (status == -EBUSY) {
1377 mlog(0, "Skipping recovery for slot %u (node %u) "
1378 "as another node has recovered it\n", slot_num,
1387 /* Stamp a clean local alloc file AFTER recovering the journal... */
1388 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
1394 /* An error from begin_truncate_log_recovery is not
1395 * serious enough to warrant halting the rest of
1397 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
1401 /* Likewise, this would be a strange but ultimately not so
1402 * harmful place to get an error... */
1403 status = ocfs2_clear_slot(osb, slot_num);
1407 /* This will kfree the memory pointed to by la_copy and tl_copy */
1408 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
1418 /* Test node liveness by trylocking his journal. If we get the lock,
1419 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
1420 * still alive (we couldn't get the lock) and < 0 on error. */
1421 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
1425 struct inode *inode = NULL;
1427 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1429 if (inode == NULL) {
1430 mlog(ML_ERROR, "access error\n");
1434 if (is_bad_inode(inode)) {
1435 mlog(ML_ERROR, "access error (bad inode)\n");
1441 SET_INODE_JOURNAL(inode);
1443 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
1444 status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
1446 if (status != -EAGAIN)
1451 ocfs2_inode_unlock(inode, 1);
1459 /* Call this underneath ocfs2_super_lock. It also assumes that the
1460 * slot info struct has been updated from disk. */
1461 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1463 unsigned int node_num;
1466 struct buffer_head *bh = NULL;
1467 struct ocfs2_dinode *di;
1469 /* This is called with the super block cluster lock, so we
1470 * know that the slot map can't change underneath us. */
1472 for (i = 0; i < osb->max_slots; i++) {
1473 /* Read journal inode to get the recovery generation */
1474 status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
1479 di = (struct ocfs2_dinode *)bh->b_data;
1480 gen = ocfs2_get_recovery_generation(di);
1484 spin_lock(&osb->osb_lock);
1485 osb->slot_recovery_generations[i] = gen;
1487 mlog(0, "Slot %u recovery generation is %u\n", i,
1488 osb->slot_recovery_generations[i]);
1490 if (i == osb->slot_num) {
1491 spin_unlock(&osb->osb_lock);
1495 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
1496 if (status == -ENOENT) {
1497 spin_unlock(&osb->osb_lock);
1501 if (__ocfs2_recovery_map_test(osb, node_num)) {
1502 spin_unlock(&osb->osb_lock);
1505 spin_unlock(&osb->osb_lock);
1507 /* Ok, we have a slot occupied by another node which
1508 * is not in the recovery map. We trylock his journal
1509 * file here to test if he's alive. */
1510 status = ocfs2_trylock_journal(osb, i);
1512 /* Since we're called from mount, we know that
1513 * the recovery thread can't race us on
1514 * setting / checking the recovery bits. */
1515 ocfs2_recovery_thread(osb, node_num);
1516 } else if ((status < 0) && (status != -EAGAIN)) {
1528 struct ocfs2_orphan_filldir_priv {
1530 struct ocfs2_super *osb;
1533 static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
1534 loff_t pos, u64 ino, unsigned type)
1536 struct ocfs2_orphan_filldir_priv *p = priv;
1539 if (name_len == 1 && !strncmp(".", name, 1))
1541 if (name_len == 2 && !strncmp("..", name, 2))
1544 /* Skip bad inodes so that recovery can continue */
1545 iter = ocfs2_iget(p->osb, ino,
1546 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
1550 mlog(0, "queue orphan %llu\n",
1551 (unsigned long long)OCFS2_I(iter)->ip_blkno);
1552 /* No locking is required for the next_orphan queue as there
1553 * is only ever a single process doing orphan recovery. */
1554 OCFS2_I(iter)->ip_next_orphan = p->head;
1560 static int ocfs2_queue_orphans(struct ocfs2_super *osb,
1562 struct inode **head)
1565 struct inode *orphan_dir_inode = NULL;
1566 struct ocfs2_orphan_filldir_priv priv;
1572 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
1573 ORPHAN_DIR_SYSTEM_INODE,
1575 if (!orphan_dir_inode) {
1581 mutex_lock(&orphan_dir_inode->i_mutex);
1582 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
1588 status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
1589 ocfs2_orphan_filldir);
1598 ocfs2_inode_unlock(orphan_dir_inode, 0);
1600 mutex_unlock(&orphan_dir_inode->i_mutex);
1601 iput(orphan_dir_inode);
1605 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
1610 spin_lock(&osb->osb_lock);
1611 ret = !osb->osb_orphan_wipes[slot];
1612 spin_unlock(&osb->osb_lock);
1616 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
1619 spin_lock(&osb->osb_lock);
1620 /* Mark ourselves such that new processes in delete_inode()
1621 * know to quit early. */
1622 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1623 while (osb->osb_orphan_wipes[slot]) {
1624 /* If any processes are already in the middle of an
1625 * orphan wipe on this dir, then we need to wait for
1627 spin_unlock(&osb->osb_lock);
1628 wait_event_interruptible(osb->osb_wipe_event,
1629 ocfs2_orphan_recovery_can_continue(osb, slot));
1630 spin_lock(&osb->osb_lock);
1632 spin_unlock(&osb->osb_lock);
1635 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
1638 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1642 * Orphan recovery. Each mounted node has it's own orphan dir which we
1643 * must run during recovery. Our strategy here is to build a list of
1644 * the inodes in the orphan dir and iget/iput them. The VFS does
1645 * (most) of the rest of the work.
1647 * Orphan recovery can happen at any time, not just mount so we have a
1648 * couple of extra considerations.
1650 * - We grab as many inodes as we can under the orphan dir lock -
1651 * doing iget() outside the orphan dir risks getting a reference on
1653 * - We must be sure not to deadlock with other processes on the
1654 * system wanting to run delete_inode(). This can happen when they go
1655 * to lock the orphan dir and the orphan recovery process attempts to
1656 * iget() inside the orphan dir lock. This can be avoided by
1657 * advertising our state to ocfs2_delete_inode().
1659 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
1663 struct inode *inode = NULL;
1665 struct ocfs2_inode_info *oi;
1667 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
1669 ocfs2_mark_recovering_orphan_dir(osb, slot);
1670 ret = ocfs2_queue_orphans(osb, slot, &inode);
1671 ocfs2_clear_recovering_orphan_dir(osb, slot);
1673 /* Error here should be noted, but we want to continue with as
1674 * many queued inodes as we've got. */
1679 oi = OCFS2_I(inode);
1680 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
1682 iter = oi->ip_next_orphan;
1684 spin_lock(&oi->ip_lock);
1685 /* The remote delete code may have set these on the
1686 * assumption that the other node would wipe them
1687 * successfully. If they are still in the node's
1688 * orphan dir, we need to reset that state. */
1689 oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
1691 /* Set the proper information to get us going into
1692 * ocfs2_delete_inode. */
1693 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
1694 spin_unlock(&oi->ip_lock);
1704 static int ocfs2_wait_on_mount(struct ocfs2_super *osb)
1706 /* This check is good because ocfs2 will wait on our recovery
1707 * thread before changing it to something other than MOUNTED
1709 wait_event(osb->osb_mount_event,
1710 atomic_read(&osb->vol_state) == VOLUME_MOUNTED ||
1711 atomic_read(&osb->vol_state) == VOLUME_DISABLED);
1713 /* If there's an error on mount, then we may never get to the
1714 * MOUNTED flag, but this is set right before
1715 * dismount_volume() so we can trust it. */
1716 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
1717 mlog(0, "mount error, exiting!\n");
1724 static int ocfs2_commit_thread(void *arg)
1727 struct ocfs2_super *osb = arg;
1728 struct ocfs2_journal *journal = osb->journal;
1730 /* we can trust j_num_trans here because _should_stop() is only set in
1731 * shutdown and nobody other than ourselves should be able to start
1732 * transactions. committing on shutdown might take a few iterations
1733 * as final transactions put deleted inodes on the list */
1734 while (!(kthread_should_stop() &&
1735 atomic_read(&journal->j_num_trans) == 0)) {
1737 wait_event_interruptible(osb->checkpoint_event,
1738 atomic_read(&journal->j_num_trans)
1739 || kthread_should_stop());
1741 status = ocfs2_commit_cache(osb);
1745 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
1747 "commit_thread: %u transactions pending on "
1749 atomic_read(&journal->j_num_trans));
1756 /* Reads all the journal inodes without taking any cluster locks. Used
1757 * for hard readonly access to determine whether any journal requires
1758 * recovery. Also used to refresh the recovery generation numbers after
1759 * a journal has been recovered by another node.
1761 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
1765 struct buffer_head *di_bh = NULL;
1766 struct ocfs2_dinode *di;
1767 int journal_dirty = 0;
1769 for(slot = 0; slot < osb->max_slots; slot++) {
1770 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
1776 di = (struct ocfs2_dinode *) di_bh->b_data;
1778 osb->slot_recovery_generations[slot] =
1779 ocfs2_get_recovery_generation(di);
1781 if (le32_to_cpu(di->id1.journal1.ij_flags) &
1782 OCFS2_JOURNAL_DIRTY_FL)