1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Defines functions of journalling api
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 #include <linux/kthread.h>
32 #define MLOG_MASK_PREFIX ML_JOURNAL
33 #include <cluster/masklog.h>
40 #include "extent_map.h"
41 #include "heartbeat.h"
44 #include "localalloc.h"
50 #include "buffer_head_io.h"
52 DEFINE_SPINLOCK(trans_inc_lock);
54 static int ocfs2_force_read_journal(struct inode *inode);
55 static int ocfs2_recover_node(struct ocfs2_super *osb,
56 int node_num, int slot_num);
57 static int __ocfs2_recovery_thread(void *arg);
58 static int ocfs2_commit_cache(struct ocfs2_super *osb);
59 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
60 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
61 int dirty, int replayed);
62 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
64 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
66 static int ocfs2_commit_thread(void *arg);
68 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
70 return __ocfs2_wait_on_mount(osb, 0);
73 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
75 return __ocfs2_wait_on_mount(osb, 1);
81 * The recovery_list is a simple linked list of node numbers to recover.
82 * It is protected by the recovery_lock.
85 struct ocfs2_recovery_map {
87 unsigned int *rm_entries;
90 int ocfs2_recovery_init(struct ocfs2_super *osb)
92 struct ocfs2_recovery_map *rm;
94 mutex_init(&osb->recovery_lock);
95 osb->disable_recovery = 0;
96 osb->recovery_thread_task = NULL;
97 init_waitqueue_head(&osb->recovery_event);
99 rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
100 osb->max_slots * sizeof(unsigned int),
107 rm->rm_entries = (unsigned int *)((char *)rm +
108 sizeof(struct ocfs2_recovery_map));
109 osb->recovery_map = rm;
114 /* we can't grab the goofy sem lock from inside wait_event, so we use
115 * memory barriers to make sure that we'll see the null task before
117 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
120 return osb->recovery_thread_task != NULL;
123 void ocfs2_recovery_exit(struct ocfs2_super *osb)
125 struct ocfs2_recovery_map *rm;
127 /* disable any new recovery threads and wait for any currently
128 * running ones to exit. Do this before setting the vol_state. */
129 mutex_lock(&osb->recovery_lock);
130 osb->disable_recovery = 1;
131 mutex_unlock(&osb->recovery_lock);
132 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
134 /* At this point, we know that no more recovery threads can be
135 * launched, so wait for any recovery completion work to
137 flush_workqueue(ocfs2_wq);
140 * Now that recovery is shut down, and the osb is about to be
141 * freed, the osb_lock is not taken here.
143 rm = osb->recovery_map;
144 /* XXX: Should we bug if there are dirty entries? */
149 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
150 unsigned int node_num)
153 struct ocfs2_recovery_map *rm = osb->recovery_map;
155 assert_spin_locked(&osb->osb_lock);
157 for (i = 0; i < rm->rm_used; i++) {
158 if (rm->rm_entries[i] == node_num)
165 /* Behaves like test-and-set. Returns the previous value */
166 static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
167 unsigned int node_num)
169 struct ocfs2_recovery_map *rm = osb->recovery_map;
171 spin_lock(&osb->osb_lock);
172 if (__ocfs2_recovery_map_test(osb, node_num)) {
173 spin_unlock(&osb->osb_lock);
177 /* XXX: Can this be exploited? Not from o2dlm... */
178 BUG_ON(rm->rm_used >= osb->max_slots);
180 rm->rm_entries[rm->rm_used] = node_num;
182 spin_unlock(&osb->osb_lock);
187 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
188 unsigned int node_num)
191 struct ocfs2_recovery_map *rm = osb->recovery_map;
193 spin_lock(&osb->osb_lock);
195 for (i = 0; i < rm->rm_used; i++) {
196 if (rm->rm_entries[i] == node_num)
200 if (i < rm->rm_used) {
201 /* XXX: be careful with the pointer math */
202 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
203 (rm->rm_used - i - 1) * sizeof(unsigned int));
207 spin_unlock(&osb->osb_lock);
210 static int ocfs2_commit_cache(struct ocfs2_super *osb)
213 unsigned int flushed;
214 unsigned long old_id;
215 struct ocfs2_journal *journal = NULL;
219 journal = osb->journal;
221 /* Flush all pending commits and checkpoint the journal. */
222 down_write(&journal->j_trans_barrier);
224 if (atomic_read(&journal->j_num_trans) == 0) {
225 up_write(&journal->j_trans_barrier);
226 mlog(0, "No transactions for me to flush!\n");
230 jbd2_journal_lock_updates(journal->j_journal);
231 status = jbd2_journal_flush(journal->j_journal);
232 jbd2_journal_unlock_updates(journal->j_journal);
234 up_write(&journal->j_trans_barrier);
239 old_id = ocfs2_inc_trans_id(journal);
241 flushed = atomic_read(&journal->j_num_trans);
242 atomic_set(&journal->j_num_trans, 0);
243 up_write(&journal->j_trans_barrier);
245 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
246 journal->j_trans_id, flushed);
248 ocfs2_wake_downconvert_thread(osb);
249 wake_up(&journal->j_checkpointed);
255 /* pass it NULL and it will allocate a new handle object for you. If
256 * you pass it a handle however, it may still return error, in which
257 * case it has free'd the passed handle for you. */
258 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
260 journal_t *journal = osb->journal->j_journal;
263 BUG_ON(!osb || !osb->journal->j_journal);
265 if (ocfs2_is_hard_readonly(osb))
266 return ERR_PTR(-EROFS);
268 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
269 BUG_ON(max_buffs <= 0);
271 /* Nested transaction? Just return the handle... */
272 if (journal_current_handle())
273 return jbd2_journal_start(journal, max_buffs);
275 down_read(&osb->journal->j_trans_barrier);
277 handle = jbd2_journal_start(journal, max_buffs);
278 if (IS_ERR(handle)) {
279 up_read(&osb->journal->j_trans_barrier);
281 mlog_errno(PTR_ERR(handle));
283 if (is_journal_aborted(journal)) {
284 ocfs2_abort(osb->sb, "Detected aborted journal");
285 handle = ERR_PTR(-EROFS);
288 if (!ocfs2_mount_local(osb))
289 atomic_inc(&(osb->journal->j_num_trans));
295 int ocfs2_commit_trans(struct ocfs2_super *osb,
299 struct ocfs2_journal *journal = osb->journal;
303 nested = handle->h_ref > 1;
304 ret = jbd2_journal_stop(handle);
309 up_read(&journal->j_trans_barrier);
315 * 'nblocks' is what you want to add to the current
316 * transaction. extend_trans will either extend the current handle by
317 * nblocks, or commit it and start a new one with nblocks credits.
319 * This might call jbd2_journal_restart() which will commit dirty buffers
320 * and then restart the transaction. Before calling
321 * ocfs2_extend_trans(), any changed blocks should have been
322 * dirtied. After calling it, all blocks which need to be changed must
323 * go through another set of journal_access/journal_dirty calls.
325 * WARNING: This will not release any semaphores or disk locks taken
326 * during the transaction, so make sure they were taken *before*
327 * start_trans or we'll have ordering deadlocks.
329 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
330 * good because transaction ids haven't yet been recorded on the
331 * cluster locks associated with this handle.
333 int ocfs2_extend_trans(handle_t *handle, int nblocks)
342 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
344 #ifdef CONFIG_OCFS2_DEBUG_FS
347 status = jbd2_journal_extend(handle, nblocks);
356 "jbd2_journal_extend failed, trying "
357 "jbd2_journal_restart\n");
358 status = jbd2_journal_restart(handle, nblocks);
372 int ocfs2_journal_access(handle_t *handle,
374 struct buffer_head *bh,
383 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
384 (unsigned long long)bh->b_blocknr, type,
385 (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
386 "OCFS2_JOURNAL_ACCESS_CREATE" :
387 "OCFS2_JOURNAL_ACCESS_WRITE",
390 /* we can safely remove this assertion after testing. */
391 if (!buffer_uptodate(bh)) {
392 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
393 mlog(ML_ERROR, "b_blocknr=%llu\n",
394 (unsigned long long)bh->b_blocknr);
398 /* Set the current transaction information on the inode so
399 * that the locking code knows whether it can drop it's locks
400 * on this inode or not. We're protected from the commit
401 * thread updating the current transaction id until
402 * ocfs2_commit_trans() because ocfs2_start_trans() took
403 * j_trans_barrier for us. */
404 ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
406 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
408 case OCFS2_JOURNAL_ACCESS_CREATE:
409 case OCFS2_JOURNAL_ACCESS_WRITE:
410 status = jbd2_journal_get_write_access(handle, bh);
413 case OCFS2_JOURNAL_ACCESS_UNDO:
414 status = jbd2_journal_get_undo_access(handle, bh);
419 mlog(ML_ERROR, "Uknown access type!\n");
421 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
424 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
431 int ocfs2_journal_dirty(handle_t *handle,
432 struct buffer_head *bh)
436 mlog_entry("(bh->b_blocknr=%llu)\n",
437 (unsigned long long)bh->b_blocknr);
439 status = jbd2_journal_dirty_metadata(handle, bh);
441 mlog(ML_ERROR, "Could not dirty metadata buffer. "
442 "(bh->b_blocknr=%llu)\n",
443 (unsigned long long)bh->b_blocknr);
449 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
451 void ocfs2_set_journal_params(struct ocfs2_super *osb)
453 journal_t *journal = osb->journal->j_journal;
454 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
456 if (osb->osb_commit_interval)
457 commit_interval = osb->osb_commit_interval;
459 spin_lock(&journal->j_state_lock);
460 journal->j_commit_interval = commit_interval;
461 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
462 journal->j_flags |= JBD2_BARRIER;
464 journal->j_flags &= ~JBD2_BARRIER;
465 spin_unlock(&journal->j_state_lock);
468 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
471 struct inode *inode = NULL; /* the journal inode */
472 journal_t *j_journal = NULL;
473 struct ocfs2_dinode *di = NULL;
474 struct buffer_head *bh = NULL;
475 struct ocfs2_super *osb;
482 osb = journal->j_osb;
484 /* already have the inode for our journal */
485 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
492 if (is_bad_inode(inode)) {
493 mlog(ML_ERROR, "access error (bad inode)\n");
500 SET_INODE_JOURNAL(inode);
501 OCFS2_I(inode)->ip_open_count++;
503 /* Skip recovery waits here - journal inode metadata never
504 * changes in a live cluster so it can be considered an
505 * exception to the rule. */
506 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
508 if (status != -ERESTARTSYS)
509 mlog(ML_ERROR, "Could not get lock on journal!\n");
514 di = (struct ocfs2_dinode *)bh->b_data;
516 if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
517 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
523 mlog(0, "inode->i_size = %lld\n", inode->i_size);
524 mlog(0, "inode->i_blocks = %llu\n",
525 (unsigned long long)inode->i_blocks);
526 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
528 /* call the kernels journal init function now */
529 j_journal = jbd2_journal_init_inode(inode);
530 if (j_journal == NULL) {
531 mlog(ML_ERROR, "Linux journal layer error\n");
536 mlog(0, "Returned from jbd2_journal_init_inode\n");
537 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
539 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
540 OCFS2_JOURNAL_DIRTY_FL);
542 journal->j_journal = j_journal;
543 journal->j_inode = inode;
546 ocfs2_set_journal_params(osb);
548 journal->j_state = OCFS2_JOURNAL_LOADED;
554 ocfs2_inode_unlock(inode, 1);
557 OCFS2_I(inode)->ip_open_count--;
566 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
568 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
571 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
573 return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
576 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
577 int dirty, int replayed)
581 struct ocfs2_journal *journal = osb->journal;
582 struct buffer_head *bh = journal->j_bh;
583 struct ocfs2_dinode *fe;
587 fe = (struct ocfs2_dinode *)bh->b_data;
589 /* The journal bh on the osb always comes from ocfs2_journal_init()
590 * and was validated there inside ocfs2_inode_lock_full(). It's a
591 * code bug if we mess it up. */
592 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
594 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
596 flags |= OCFS2_JOURNAL_DIRTY_FL;
598 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
599 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
602 ocfs2_bump_recovery_generation(fe);
604 status = ocfs2_write_block(osb, bh, journal->j_inode);
613 * If the journal has been kmalloc'd it needs to be freed after this
616 void ocfs2_journal_shutdown(struct ocfs2_super *osb)
618 struct ocfs2_journal *journal = NULL;
620 struct inode *inode = NULL;
621 int num_running_trans = 0;
627 journal = osb->journal;
631 inode = journal->j_inode;
633 if (journal->j_state != OCFS2_JOURNAL_LOADED)
636 /* need to inc inode use count - jbd2_journal_destroy will iput. */
640 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
641 if (num_running_trans > 0)
642 mlog(0, "Shutting down journal: must wait on %d "
643 "running transactions!\n",
646 /* Do a commit_cache here. It will flush our journal, *and*
647 * release any locks that are still held.
648 * set the SHUTDOWN flag and release the trans lock.
649 * the commit thread will take the trans lock for us below. */
650 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
652 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
653 * drop the trans_lock (which we want to hold until we
654 * completely destroy the journal. */
655 if (osb->commit_task) {
656 /* Wait for the commit thread */
657 mlog(0, "Waiting for ocfs2commit to exit....\n");
658 kthread_stop(osb->commit_task);
659 osb->commit_task = NULL;
662 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
664 if (ocfs2_mount_local(osb)) {
665 jbd2_journal_lock_updates(journal->j_journal);
666 status = jbd2_journal_flush(journal->j_journal);
667 jbd2_journal_unlock_updates(journal->j_journal);
674 * Do not toggle if flush was unsuccessful otherwise
675 * will leave dirty metadata in a "clean" journal
677 status = ocfs2_journal_toggle_dirty(osb, 0, 0);
682 /* Shutdown the kernel journal system */
683 jbd2_journal_destroy(journal->j_journal);
684 journal->j_journal = NULL;
686 OCFS2_I(inode)->ip_open_count--;
688 /* unlock our journal */
689 ocfs2_inode_unlock(inode, 1);
691 brelse(journal->j_bh);
692 journal->j_bh = NULL;
694 journal->j_state = OCFS2_JOURNAL_FREE;
696 // up_write(&journal->j_trans_barrier);
703 static void ocfs2_clear_journal_error(struct super_block *sb,
709 olderr = jbd2_journal_errno(journal);
711 mlog(ML_ERROR, "File system error %d recorded in "
712 "journal %u.\n", olderr, slot);
713 mlog(ML_ERROR, "File system on device %s needs checking.\n",
716 jbd2_journal_ack_err(journal);
717 jbd2_journal_clear_err(journal);
721 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
724 struct ocfs2_super *osb;
730 osb = journal->j_osb;
732 status = jbd2_journal_load(journal->j_journal);
734 mlog(ML_ERROR, "Failed to load journal!\n");
738 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
740 status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
746 /* Launch the commit thread */
748 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
750 if (IS_ERR(osb->commit_task)) {
751 status = PTR_ERR(osb->commit_task);
752 osb->commit_task = NULL;
753 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
758 osb->commit_task = NULL;
766 /* 'full' flag tells us whether we clear out all blocks or if we just
767 * mark the journal clean */
768 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
776 status = jbd2_journal_wipe(journal->j_journal, full);
782 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
791 static int ocfs2_recovery_completed(struct ocfs2_super *osb)
794 struct ocfs2_recovery_map *rm = osb->recovery_map;
796 spin_lock(&osb->osb_lock);
797 empty = (rm->rm_used == 0);
798 spin_unlock(&osb->osb_lock);
803 void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
805 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
809 * JBD Might read a cached version of another nodes journal file. We
810 * don't want this as this file changes often and we get no
811 * notification on those changes. The only way to be sure that we've
812 * got the most up to date version of those blocks then is to force
813 * read them off disk. Just searching through the buffer cache won't
814 * work as there may be pages backing this file which are still marked
815 * up to date. We know things can't change on this file underneath us
816 * as we have the lock by now :)
818 static int ocfs2_force_read_journal(struct inode *inode)
822 u64 v_blkno, p_blkno, p_blocks, num_blocks;
823 #define CONCURRENT_JOURNAL_FILL 32ULL
824 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
828 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
830 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
832 while (v_blkno < num_blocks) {
833 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
834 &p_blkno, &p_blocks, NULL);
840 if (p_blocks > CONCURRENT_JOURNAL_FILL)
841 p_blocks = CONCURRENT_JOURNAL_FILL;
843 /* We are reading journal data which should not
844 * be put in the uptodate cache */
845 status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
846 p_blkno, p_blocks, bhs);
852 for(i = 0; i < p_blocks; i++) {
861 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
867 struct ocfs2_la_recovery_item {
868 struct list_head lri_list;
870 struct ocfs2_dinode *lri_la_dinode;
871 struct ocfs2_dinode *lri_tl_dinode;
872 struct ocfs2_quota_recovery *lri_qrec;
875 /* Does the second half of the recovery process. By this point, the
876 * node is marked clean and can actually be considered recovered,
877 * hence it's no longer in the recovery map, but there's still some
878 * cleanup we can do which shouldn't happen within the recovery thread
879 * as locking in that context becomes very difficult if we are to take
880 * recovering nodes into account.
882 * NOTE: This function can and will sleep on recovery of other nodes
883 * during cluster locking, just like any other ocfs2 process.
885 void ocfs2_complete_recovery(struct work_struct *work)
888 struct ocfs2_journal *journal =
889 container_of(work, struct ocfs2_journal, j_recovery_work);
890 struct ocfs2_super *osb = journal->j_osb;
891 struct ocfs2_dinode *la_dinode, *tl_dinode;
892 struct ocfs2_la_recovery_item *item, *n;
893 struct ocfs2_quota_recovery *qrec;
894 LIST_HEAD(tmp_la_list);
898 mlog(0, "completing recovery from keventd\n");
900 spin_lock(&journal->j_lock);
901 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
902 spin_unlock(&journal->j_lock);
904 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
905 list_del_init(&item->lri_list);
907 mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
909 ocfs2_wait_on_quotas(osb);
911 la_dinode = item->lri_la_dinode;
913 mlog(0, "Clean up local alloc %llu\n",
914 (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
916 ret = ocfs2_complete_local_alloc_recovery(osb,
924 tl_dinode = item->lri_tl_dinode;
926 mlog(0, "Clean up truncate log %llu\n",
927 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
929 ret = ocfs2_complete_truncate_log_recovery(osb,
937 ret = ocfs2_recover_orphans(osb, item->lri_slot);
941 qrec = item->lri_qrec;
943 mlog(0, "Recovering quota files");
944 ret = ocfs2_finish_quota_recovery(osb, qrec,
948 /* Recovery info is already freed now */
954 mlog(0, "Recovery completion\n");
958 /* NOTE: This function always eats your references to la_dinode and
959 * tl_dinode, either manually on error, or by passing them to
960 * ocfs2_complete_recovery */
961 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
963 struct ocfs2_dinode *la_dinode,
964 struct ocfs2_dinode *tl_dinode,
965 struct ocfs2_quota_recovery *qrec)
967 struct ocfs2_la_recovery_item *item;
969 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
971 /* Though we wish to avoid it, we are in fact safe in
972 * skipping local alloc cleanup as fsck.ocfs2 is more
973 * than capable of reclaiming unused space. */
981 ocfs2_free_quota_recovery(qrec);
987 INIT_LIST_HEAD(&item->lri_list);
988 item->lri_la_dinode = la_dinode;
989 item->lri_slot = slot_num;
990 item->lri_tl_dinode = tl_dinode;
991 item->lri_qrec = qrec;
993 spin_lock(&journal->j_lock);
994 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
995 queue_work(ocfs2_wq, &journal->j_recovery_work);
996 spin_unlock(&journal->j_lock);
999 /* Called by the mount code to queue recovery the last part of
1000 * recovery for it's own slot. */
1001 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1003 struct ocfs2_journal *journal = osb->journal;
1006 /* No need to queue up our truncate_log as regular
1007 * cleanup will catch that. */
1008 ocfs2_queue_recovery_completion(journal,
1010 osb->local_alloc_copy,
1013 ocfs2_schedule_truncate_log_flush(osb, 0);
1015 osb->local_alloc_copy = NULL;
1020 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
1022 if (osb->quota_rec) {
1023 ocfs2_queue_recovery_completion(osb->journal,
1028 osb->quota_rec = NULL;
1032 static int __ocfs2_recovery_thread(void *arg)
1034 int status, node_num, slot_num;
1035 struct ocfs2_super *osb = arg;
1036 struct ocfs2_recovery_map *rm = osb->recovery_map;
1037 int *rm_quota = NULL;
1038 int rm_quota_used = 0, i;
1039 struct ocfs2_quota_recovery *qrec;
1043 status = ocfs2_wait_on_mount(osb);
1048 rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
1054 status = ocfs2_super_lock(osb, 1);
1060 spin_lock(&osb->osb_lock);
1061 while (rm->rm_used) {
1062 /* It's always safe to remove entry zero, as we won't
1063 * clear it until ocfs2_recover_node() has succeeded. */
1064 node_num = rm->rm_entries[0];
1065 spin_unlock(&osb->osb_lock);
1066 mlog(0, "checking node %d\n", node_num);
1067 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1068 if (slot_num == -ENOENT) {
1070 mlog(0, "no slot for this node, so no recovery"
1074 mlog(0, "node %d was using slot %d\n", node_num, slot_num);
1076 /* It is a bit subtle with quota recovery. We cannot do it
1077 * immediately because we have to obtain cluster locks from
1078 * quota files and we also don't want to just skip it because
1079 * then quota usage would be out of sync until some node takes
1080 * the slot. So we remember which nodes need quota recovery
1081 * and when everything else is done, we recover quotas. */
1082 for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
1083 if (i == rm_quota_used)
1084 rm_quota[rm_quota_used++] = slot_num;
1086 status = ocfs2_recover_node(osb, node_num, slot_num);
1089 ocfs2_recovery_map_clear(osb, node_num);
1092 "Error %d recovering node %d on device (%u,%u)!\n",
1094 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1095 mlog(ML_ERROR, "Volume requires unmount.\n");
1098 spin_lock(&osb->osb_lock);
1100 spin_unlock(&osb->osb_lock);
1101 mlog(0, "All nodes recovered\n");
1103 /* Refresh all journal recovery generations from disk */
1104 status = ocfs2_check_journals_nolocks(osb);
1105 status = (status == -EROFS) ? 0 : status;
1109 /* Now it is right time to recover quotas... We have to do this under
1110 * superblock lock so that noone can start using the slot (and crash)
1111 * before we recover it */
1112 for (i = 0; i < rm_quota_used; i++) {
1113 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
1115 status = PTR_ERR(qrec);
1119 ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
1123 ocfs2_super_unlock(osb, 1);
1125 /* We always run recovery on our own orphan dir - the dead
1126 * node(s) may have disallowd a previos inode delete. Re-processing
1127 * is therefore required. */
1128 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
1132 mutex_lock(&osb->recovery_lock);
1133 if (!status && !ocfs2_recovery_completed(osb)) {
1134 mutex_unlock(&osb->recovery_lock);
1138 osb->recovery_thread_task = NULL;
1139 mb(); /* sync with ocfs2_recovery_thread_running */
1140 wake_up(&osb->recovery_event);
1142 mutex_unlock(&osb->recovery_lock);
1148 /* no one is callint kthread_stop() for us so the kthread() api
1149 * requires that we call do_exit(). And it isn't exported, but
1150 * complete_and_exit() seems to be a minimal wrapper around it. */
1151 complete_and_exit(NULL, status);
1155 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1157 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1158 node_num, osb->node_num);
1160 mutex_lock(&osb->recovery_lock);
1161 if (osb->disable_recovery)
1164 /* People waiting on recovery will wait on
1165 * the recovery map to empty. */
1166 if (ocfs2_recovery_map_set(osb, node_num))
1167 mlog(0, "node %d already in recovery map.\n", node_num);
1169 mlog(0, "starting recovery thread...\n");
1171 if (osb->recovery_thread_task)
1174 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1176 if (IS_ERR(osb->recovery_thread_task)) {
1177 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1178 osb->recovery_thread_task = NULL;
1182 mutex_unlock(&osb->recovery_lock);
1183 wake_up(&osb->recovery_event);
1188 static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
1190 struct buffer_head **bh,
1191 struct inode **ret_inode)
1193 int status = -EACCES;
1194 struct inode *inode = NULL;
1196 BUG_ON(slot_num >= osb->max_slots);
1198 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1200 if (!inode || is_bad_inode(inode)) {
1204 SET_INODE_JOURNAL(inode);
1206 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
1216 if (status || !ret_inode)
1224 /* Does the actual journal replay and marks the journal inode as
1225 * clean. Will only replay if the journal inode is marked dirty. */
1226 static int ocfs2_replay_journal(struct ocfs2_super *osb,
1233 struct inode *inode = NULL;
1234 struct ocfs2_dinode *fe;
1235 journal_t *journal = NULL;
1236 struct buffer_head *bh = NULL;
1239 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
1245 fe = (struct ocfs2_dinode *)bh->b_data;
1246 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1251 * As the fs recovery is asynchronous, there is a small chance that
1252 * another node mounted (and recovered) the slot before the recovery
1253 * thread could get the lock. To handle that, we dirty read the journal
1254 * inode for that slot to get the recovery generation. If it is
1255 * different than what we expected, the slot has been recovered.
1256 * If not, it needs recovery.
1258 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1259 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
1260 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1261 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1266 /* Continue with recovery as the journal has not yet been recovered */
1268 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1270 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
1271 if (status != -ERESTARTSYS)
1272 mlog(ML_ERROR, "Could not lock journal!\n");
1277 fe = (struct ocfs2_dinode *) bh->b_data;
1279 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1280 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1282 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1283 mlog(0, "No recovery required for node %d\n", node_num);
1284 /* Refresh recovery generation for the slot */
1285 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1289 mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
1291 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1293 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1295 status = ocfs2_force_read_journal(inode);
1301 mlog(0, "calling journal_init_inode\n");
1302 journal = jbd2_journal_init_inode(inode);
1303 if (journal == NULL) {
1304 mlog(ML_ERROR, "Linux journal layer error\n");
1309 status = jbd2_journal_load(journal);
1314 jbd2_journal_destroy(journal);
1318 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1320 /* wipe the journal */
1321 mlog(0, "flushing the journal.\n");
1322 jbd2_journal_lock_updates(journal);
1323 status = jbd2_journal_flush(journal);
1324 jbd2_journal_unlock_updates(journal);
1328 /* This will mark the node clean */
1329 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1330 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1331 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1333 /* Increment recovery generation to indicate successful recovery */
1334 ocfs2_bump_recovery_generation(fe);
1335 osb->slot_recovery_generations[slot_num] =
1336 ocfs2_get_recovery_generation(fe);
1338 status = ocfs2_write_block(osb, bh, inode);
1345 jbd2_journal_destroy(journal);
1348 /* drop the lock on this nodes journal */
1350 ocfs2_inode_unlock(inode, 1);
1362 * Do the most important parts of node recovery:
1363 * - Replay it's journal
1364 * - Stamp a clean local allocator file
1365 * - Stamp a clean truncate log
1366 * - Mark the node clean
1368 * If this function completes without error, a node in OCFS2 can be
1369 * said to have been safely recovered. As a result, failure during the
1370 * second part of a nodes recovery process (local alloc recovery) is
1371 * far less concerning.
1373 static int ocfs2_recover_node(struct ocfs2_super *osb,
1374 int node_num, int slot_num)
1377 struct ocfs2_dinode *la_copy = NULL;
1378 struct ocfs2_dinode *tl_copy = NULL;
1380 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
1381 node_num, slot_num, osb->node_num);
1383 /* Should not ever be called to recover ourselves -- in that
1384 * case we should've called ocfs2_journal_load instead. */
1385 BUG_ON(osb->node_num == node_num);
1387 status = ocfs2_replay_journal(osb, node_num, slot_num);
1389 if (status == -EBUSY) {
1390 mlog(0, "Skipping recovery for slot %u (node %u) "
1391 "as another node has recovered it\n", slot_num,
1400 /* Stamp a clean local alloc file AFTER recovering the journal... */
1401 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
1407 /* An error from begin_truncate_log_recovery is not
1408 * serious enough to warrant halting the rest of
1410 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
1414 /* Likewise, this would be a strange but ultimately not so
1415 * harmful place to get an error... */
1416 status = ocfs2_clear_slot(osb, slot_num);
1420 /* This will kfree the memory pointed to by la_copy and tl_copy */
1421 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
1431 /* Test node liveness by trylocking his journal. If we get the lock,
1432 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
1433 * still alive (we couldn't get the lock) and < 0 on error. */
1434 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
1438 struct inode *inode = NULL;
1440 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1442 if (inode == NULL) {
1443 mlog(ML_ERROR, "access error\n");
1447 if (is_bad_inode(inode)) {
1448 mlog(ML_ERROR, "access error (bad inode)\n");
1454 SET_INODE_JOURNAL(inode);
1456 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
1457 status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
1459 if (status != -EAGAIN)
1464 ocfs2_inode_unlock(inode, 1);
1472 /* Call this underneath ocfs2_super_lock. It also assumes that the
1473 * slot info struct has been updated from disk. */
1474 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1476 unsigned int node_num;
1479 struct buffer_head *bh = NULL;
1480 struct ocfs2_dinode *di;
1482 /* This is called with the super block cluster lock, so we
1483 * know that the slot map can't change underneath us. */
1485 for (i = 0; i < osb->max_slots; i++) {
1486 /* Read journal inode to get the recovery generation */
1487 status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
1492 di = (struct ocfs2_dinode *)bh->b_data;
1493 gen = ocfs2_get_recovery_generation(di);
1497 spin_lock(&osb->osb_lock);
1498 osb->slot_recovery_generations[i] = gen;
1500 mlog(0, "Slot %u recovery generation is %u\n", i,
1501 osb->slot_recovery_generations[i]);
1503 if (i == osb->slot_num) {
1504 spin_unlock(&osb->osb_lock);
1508 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
1509 if (status == -ENOENT) {
1510 spin_unlock(&osb->osb_lock);
1514 if (__ocfs2_recovery_map_test(osb, node_num)) {
1515 spin_unlock(&osb->osb_lock);
1518 spin_unlock(&osb->osb_lock);
1520 /* Ok, we have a slot occupied by another node which
1521 * is not in the recovery map. We trylock his journal
1522 * file here to test if he's alive. */
1523 status = ocfs2_trylock_journal(osb, i);
1525 /* Since we're called from mount, we know that
1526 * the recovery thread can't race us on
1527 * setting / checking the recovery bits. */
1528 ocfs2_recovery_thread(osb, node_num);
1529 } else if ((status < 0) && (status != -EAGAIN)) {
1541 struct ocfs2_orphan_filldir_priv {
1543 struct ocfs2_super *osb;
1546 static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
1547 loff_t pos, u64 ino, unsigned type)
1549 struct ocfs2_orphan_filldir_priv *p = priv;
1552 if (name_len == 1 && !strncmp(".", name, 1))
1554 if (name_len == 2 && !strncmp("..", name, 2))
1557 /* Skip bad inodes so that recovery can continue */
1558 iter = ocfs2_iget(p->osb, ino,
1559 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
1563 mlog(0, "queue orphan %llu\n",
1564 (unsigned long long)OCFS2_I(iter)->ip_blkno);
1565 /* No locking is required for the next_orphan queue as there
1566 * is only ever a single process doing orphan recovery. */
1567 OCFS2_I(iter)->ip_next_orphan = p->head;
1573 static int ocfs2_queue_orphans(struct ocfs2_super *osb,
1575 struct inode **head)
1578 struct inode *orphan_dir_inode = NULL;
1579 struct ocfs2_orphan_filldir_priv priv;
1585 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
1586 ORPHAN_DIR_SYSTEM_INODE,
1588 if (!orphan_dir_inode) {
1594 mutex_lock(&orphan_dir_inode->i_mutex);
1595 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
1601 status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
1602 ocfs2_orphan_filldir);
1611 ocfs2_inode_unlock(orphan_dir_inode, 0);
1613 mutex_unlock(&orphan_dir_inode->i_mutex);
1614 iput(orphan_dir_inode);
1618 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
1623 spin_lock(&osb->osb_lock);
1624 ret = !osb->osb_orphan_wipes[slot];
1625 spin_unlock(&osb->osb_lock);
1629 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
1632 spin_lock(&osb->osb_lock);
1633 /* Mark ourselves such that new processes in delete_inode()
1634 * know to quit early. */
1635 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1636 while (osb->osb_orphan_wipes[slot]) {
1637 /* If any processes are already in the middle of an
1638 * orphan wipe on this dir, then we need to wait for
1640 spin_unlock(&osb->osb_lock);
1641 wait_event_interruptible(osb->osb_wipe_event,
1642 ocfs2_orphan_recovery_can_continue(osb, slot));
1643 spin_lock(&osb->osb_lock);
1645 spin_unlock(&osb->osb_lock);
1648 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
1651 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
1655 * Orphan recovery. Each mounted node has it's own orphan dir which we
1656 * must run during recovery. Our strategy here is to build a list of
1657 * the inodes in the orphan dir and iget/iput them. The VFS does
1658 * (most) of the rest of the work.
1660 * Orphan recovery can happen at any time, not just mount so we have a
1661 * couple of extra considerations.
1663 * - We grab as many inodes as we can under the orphan dir lock -
1664 * doing iget() outside the orphan dir risks getting a reference on
1666 * - We must be sure not to deadlock with other processes on the
1667 * system wanting to run delete_inode(). This can happen when they go
1668 * to lock the orphan dir and the orphan recovery process attempts to
1669 * iget() inside the orphan dir lock. This can be avoided by
1670 * advertising our state to ocfs2_delete_inode().
1672 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
1676 struct inode *inode = NULL;
1678 struct ocfs2_inode_info *oi;
1680 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
1682 ocfs2_mark_recovering_orphan_dir(osb, slot);
1683 ret = ocfs2_queue_orphans(osb, slot, &inode);
1684 ocfs2_clear_recovering_orphan_dir(osb, slot);
1686 /* Error here should be noted, but we want to continue with as
1687 * many queued inodes as we've got. */
1692 oi = OCFS2_I(inode);
1693 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
1695 iter = oi->ip_next_orphan;
1697 spin_lock(&oi->ip_lock);
1698 /* The remote delete code may have set these on the
1699 * assumption that the other node would wipe them
1700 * successfully. If they are still in the node's
1701 * orphan dir, we need to reset that state. */
1702 oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
1704 /* Set the proper information to get us going into
1705 * ocfs2_delete_inode. */
1706 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
1707 spin_unlock(&oi->ip_lock);
1717 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
1719 /* This check is good because ocfs2 will wait on our recovery
1720 * thread before changing it to something other than MOUNTED
1722 wait_event(osb->osb_mount_event,
1723 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
1724 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
1725 atomic_read(&osb->vol_state) == VOLUME_DISABLED);
1727 /* If there's an error on mount, then we may never get to the
1728 * MOUNTED flag, but this is set right before
1729 * dismount_volume() so we can trust it. */
1730 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
1731 mlog(0, "mount error, exiting!\n");
1738 static int ocfs2_commit_thread(void *arg)
1741 struct ocfs2_super *osb = arg;
1742 struct ocfs2_journal *journal = osb->journal;
1744 /* we can trust j_num_trans here because _should_stop() is only set in
1745 * shutdown and nobody other than ourselves should be able to start
1746 * transactions. committing on shutdown might take a few iterations
1747 * as final transactions put deleted inodes on the list */
1748 while (!(kthread_should_stop() &&
1749 atomic_read(&journal->j_num_trans) == 0)) {
1751 wait_event_interruptible(osb->checkpoint_event,
1752 atomic_read(&journal->j_num_trans)
1753 || kthread_should_stop());
1755 status = ocfs2_commit_cache(osb);
1759 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
1761 "commit_thread: %u transactions pending on "
1763 atomic_read(&journal->j_num_trans));
1770 /* Reads all the journal inodes without taking any cluster locks. Used
1771 * for hard readonly access to determine whether any journal requires
1772 * recovery. Also used to refresh the recovery generation numbers after
1773 * a journal has been recovered by another node.
1775 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
1779 struct buffer_head *di_bh = NULL;
1780 struct ocfs2_dinode *di;
1781 int journal_dirty = 0;
1783 for(slot = 0; slot < osb->max_slots; slot++) {
1784 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
1790 di = (struct ocfs2_dinode *) di_bh->b_data;
1792 osb->slot_recovery_generations[slot] =
1793 ocfs2_get_recovery_generation(di);
1795 if (le32_to_cpu(di->id1.journal1.ij_flags) &
1796 OCFS2_JOURNAL_DIRTY_FL)