X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_mount.c;h=6409b376299594f4a90fd0e8b67b0f34656364da;hb=f81e8a43871f44f98dd14e83a83bf9ca0b3b46c5;hp=a96bde6df96d13cb61a92708aff156ad6b282e97;hpb=9a9136e270af14da506f66bcafcc506b86a86498;p=linux-2.6-omap-h63xx.git diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index a96bde6df96..6409b376299 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -136,15 +136,9 @@ xfs_mount_init(void) mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; } - AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); - spinlock_init(&mp->m_sb_lock, "xfs_sb"); + spin_lock_init(&mp->m_sb_lock); mutex_init(&mp->m_ilock); - initnsema(&mp->m_growlock, 1, "xfs_grow"); - /* - * Initialize the AIL. - */ - xfs_trans_ail_init(mp); - + mutex_init(&mp->m_growlock); atomic_set(&mp->m_active_trans, 0); return mp; @@ -157,14 +151,8 @@ xfs_mount_init(void) */ void xfs_mount_free( - xfs_mount_t *mp, - int remove_bhv) + xfs_mount_t *mp) { - if (mp->m_ihash) - xfs_ihash_free(mp); - if (mp->m_chash) - xfs_chash_free(mp); - if (mp->m_perag) { int agno; @@ -177,10 +165,10 @@ xfs_mount_free( sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); } - AIL_LOCK_DESTROY(&mp->m_ail_lock); + spinlock_destroy(&mp->m_ail_lock); spinlock_destroy(&mp->m_sb_lock); mutex_destroy(&mp->m_ilock); - freesema(&mp->m_growlock); + mutex_destroy(&mp->m_growlock); if (mp->m_quotainfo) XFS_QM_DONE(mp); @@ -191,17 +179,30 @@ xfs_mount_free( if (mp->m_logname != NULL) kmem_free(mp->m_logname, strlen(mp->m_logname) + 1); - if (remove_bhv) { - struct bhv_vfs *vfsp = XFS_MTOVFS(mp); - - bhv_remove_all_vfsops(vfsp, 0); - VFS_REMOVEBHV(vfsp, &mp->m_bhv); - } - xfs_icsb_destroy_counters(mp); - kmem_free(mp, sizeof(xfs_mount_t)); } +/* + * Check size of device based on the (data/realtime) block count. + * Note: this check is used by the growfs code as well as mount. + */ +int +xfs_sb_validate_fsb_count( + xfs_sb_t *sbp, + __uint64_t nblocks) +{ + ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); + ASSERT(sbp->sb_blocklog >= BBSHIFT); + +#if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ + if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) + return E2BIG; +#else /* Limited by UINT_MAX of sectors */ + if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) + return E2BIG; +#endif + return 0; +} /* * Check the validity of the SB found. @@ -284,18 +285,8 @@ xfs_mount_validate_sb( return XFS_ERROR(EFSCORRUPTED); } - ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); - ASSERT(sbp->sb_blocklog >= BBSHIFT); - -#if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ - if (unlikely( - (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX || - (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) { -#else /* Limited by UINT_MAX of sectors */ - if (unlikely( - (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX || - (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) { -#endif + if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || + xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { xfs_fs_mount_cmn_err(flags, "file system too large to be mounted on this system."); return XFS_ERROR(E2BIG); @@ -331,9 +322,19 @@ xfs_mount_validate_sb( return 0; } +STATIC void +xfs_initialize_perag_icache( + xfs_perag_t *pag) +{ + if (!pag->pag_ici_init) { + rwlock_init(&pag->pag_ici_lock); + INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); + pag->pag_ici_init = 1; + } +} + xfs_agnumber_t xfs_initialize_perag( - bhv_vfs_t *vfs, xfs_mount_t *mp, xfs_agnumber_t agcount) { @@ -351,7 +352,7 @@ xfs_initialize_perag( /* Clear the mount flag if no inode can overflow 32 bits * on this filesystem, or if specifically requested.. */ - if ((vfs->vfs_flag & VFS_32BITINODES) && ino > max_inum) { + if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > max_inum) { mp->m_flags |= XFS_MOUNT_32BITINODES; } else { mp->m_flags &= ~XFS_MOUNT_32BITINODES; @@ -385,48 +386,92 @@ xfs_initialize_perag( pag->pagi_inodeok = 1; if (index < max_metadata) pag->pagf_metadata = 1; + xfs_initialize_perag_icache(pag); } } else { /* Setup default behavior for smaller filesystems */ for (index = 0; index < agcount; index++) { pag = &mp->m_perag[index]; pag->pagi_inodeok = 1; + xfs_initialize_perag_icache(pag); } } return index; } +void +xfs_sb_from_disk( + xfs_sb_t *to, + xfs_dsb_t *from) +{ + to->sb_magicnum = be32_to_cpu(from->sb_magicnum); + to->sb_blocksize = be32_to_cpu(from->sb_blocksize); + to->sb_dblocks = be64_to_cpu(from->sb_dblocks); + to->sb_rblocks = be64_to_cpu(from->sb_rblocks); + to->sb_rextents = be64_to_cpu(from->sb_rextents); + memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); + to->sb_logstart = be64_to_cpu(from->sb_logstart); + to->sb_rootino = be64_to_cpu(from->sb_rootino); + to->sb_rbmino = be64_to_cpu(from->sb_rbmino); + to->sb_rsumino = be64_to_cpu(from->sb_rsumino); + to->sb_rextsize = be32_to_cpu(from->sb_rextsize); + to->sb_agblocks = be32_to_cpu(from->sb_agblocks); + to->sb_agcount = be32_to_cpu(from->sb_agcount); + to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks); + to->sb_logblocks = be32_to_cpu(from->sb_logblocks); + to->sb_versionnum = be16_to_cpu(from->sb_versionnum); + to->sb_sectsize = be16_to_cpu(from->sb_sectsize); + to->sb_inodesize = be16_to_cpu(from->sb_inodesize); + to->sb_inopblock = be16_to_cpu(from->sb_inopblock); + memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); + to->sb_blocklog = from->sb_blocklog; + to->sb_sectlog = from->sb_sectlog; + to->sb_inodelog = from->sb_inodelog; + to->sb_inopblog = from->sb_inopblog; + to->sb_agblklog = from->sb_agblklog; + to->sb_rextslog = from->sb_rextslog; + to->sb_inprogress = from->sb_inprogress; + to->sb_imax_pct = from->sb_imax_pct; + to->sb_icount = be64_to_cpu(from->sb_icount); + to->sb_ifree = be64_to_cpu(from->sb_ifree); + to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); + to->sb_frextents = be64_to_cpu(from->sb_frextents); + to->sb_uquotino = be64_to_cpu(from->sb_uquotino); + to->sb_gquotino = be64_to_cpu(from->sb_gquotino); + to->sb_qflags = be16_to_cpu(from->sb_qflags); + to->sb_flags = from->sb_flags; + to->sb_shared_vn = from->sb_shared_vn; + to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt); + to->sb_unit = be32_to_cpu(from->sb_unit); + to->sb_width = be32_to_cpu(from->sb_width); + to->sb_dirblklog = from->sb_dirblklog; + to->sb_logsectlog = from->sb_logsectlog; + to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize); + to->sb_logsunit = be32_to_cpu(from->sb_logsunit); + to->sb_features2 = be32_to_cpu(from->sb_features2); +} + /* - * xfs_xlatesb + * Copy in core superblock to ondisk one. * - * data - on disk version of sb - * sb - a superblock - * dir - conversion direction: <0 - convert sb to buf - * >0 - convert buf to sb - * fields - which fields to copy (bitmask) + * The fields argument is mask of superblock fields to copy. */ void -xfs_xlatesb( - void *data, - xfs_sb_t *sb, - int dir, +xfs_sb_to_disk( + xfs_dsb_t *to, + xfs_sb_t *from, __int64_t fields) { - xfs_caddr_t buf_ptr; - xfs_caddr_t mem_ptr; + xfs_caddr_t to_ptr = (xfs_caddr_t)to; + xfs_caddr_t from_ptr = (xfs_caddr_t)from; xfs_sb_field_t f; int first; int size; - ASSERT(dir); ASSERT(fields); - if (!fields) return; - buf_ptr = (xfs_caddr_t)data; - mem_ptr = (xfs_caddr_t)sb; - while (fields) { f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); first = xfs_sb_info[f].offset; @@ -435,26 +480,20 @@ xfs_xlatesb( ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); if (size == 1 || xfs_sb_info[f].type == 1) { - if (dir > 0) { - memcpy(mem_ptr + first, buf_ptr + first, size); - } else { - memcpy(buf_ptr + first, mem_ptr + first, size); - } + memcpy(to_ptr + first, from_ptr + first, size); } else { switch (size) { case 2: - INT_XLATE(*(__uint16_t*)(buf_ptr+first), - *(__uint16_t*)(mem_ptr+first), - dir, ARCH_CONVERT); + *(__be16 *)(to_ptr + first) = + cpu_to_be16(*(__u16 *)(from_ptr + first)); break; case 4: - INT_XLATE(*(__uint32_t*)(buf_ptr+first), - *(__uint32_t*)(mem_ptr+first), - dir, ARCH_CONVERT); + *(__be32 *)(to_ptr + first) = + cpu_to_be32(*(__u32 *)(from_ptr + first)); break; case 8: - INT_XLATE(*(__uint64_t*)(buf_ptr+first), - *(__uint64_t*)(mem_ptr+first), dir, ARCH_CONVERT); + *(__be64 *)(to_ptr + first) = + cpu_to_be64(*(__u64 *)(from_ptr + first)); break; default: ASSERT(0); @@ -476,7 +515,6 @@ xfs_readsb(xfs_mount_t *mp, int flags) unsigned int sector_size; unsigned int extra_flags; xfs_buf_t *bp; - xfs_sb_t *sbp; int error; ASSERT(mp->m_sb_bp == NULL); @@ -504,8 +542,7 @@ xfs_readsb(xfs_mount_t *mp, int flags) * Initialize the mount structure from the superblock. * But first do some basic consistency checking. */ - sbp = XFS_BUF_TO_SBP(bp); - xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS); + xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); if (error) { @@ -573,7 +610,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) int i; mp->m_agfrotor = mp->m_agirotor = 0; - spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); + spin_lock_init(&mp->m_agirotor_lock); mp->m_maxagi = mp->m_sb.sb_agcount; mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; @@ -632,51 +669,71 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) sbp->sb_inopblock); mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; } + /* - * xfs_mountfs + * xfs_initialize_perag_data * - * This function does the following on an initial mount of a file system: - * - reads the superblock from disk and init the mount struct - * - if we're a 32-bit kernel, do a size check on the superblock - * so we don't mount terabyte filesystems - * - init mount struct realtime fields - * - allocate inode hash table for fs - * - init directory manager - * - perform recovery and init the log manager + * Read in each per-ag structure so we can count up the number of + * allocated inodes, free inodes and used filesystem blocks as this + * information is no longer persistent in the superblock. Once we have + * this information, write it into the in-core superblock structure. */ -int -xfs_mountfs( - bhv_vfs_t *vfsp, - xfs_mount_t *mp, - int mfsi_flags) +STATIC int +xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) { - xfs_buf_t *bp; - xfs_sb_t *sbp = &(mp->m_sb); - xfs_inode_t *rip; - bhv_vnode_t *rvp = NULL; - int readio_log, writeio_log; - xfs_daddr_t d; - __uint64_t ret64; - __int64_t update_flags; - uint quotamount, quotaflags; - int agno; - int uuid_mounted = 0; - int error = 0; + xfs_agnumber_t index; + xfs_perag_t *pag; + xfs_sb_t *sbp = &mp->m_sb; + uint64_t ifree = 0; + uint64_t ialloc = 0; + uint64_t bfree = 0; + uint64_t bfreelst = 0; + uint64_t btree = 0; + int error; - if (mp->m_sb_bp == NULL) { - if ((error = xfs_readsb(mp, mfsi_flags))) { + for (index = 0; index < agcount; index++) { + /* + * read the agf, then the agi. This gets us + * all the inforamtion we need and populates the + * per-ag structures for us. + */ + error = xfs_alloc_pagf_init(mp, NULL, index, 0); + if (error) return error; - } - } - xfs_mount_common(mp, sbp); + error = xfs_ialloc_pagi_init(mp, NULL, index); + if (error) + return error; + pag = &mp->m_perag[index]; + ifree += pag->pagi_freecount; + ialloc += pag->pagi_count; + bfree += pag->pagf_freeblks; + bfreelst += pag->pagf_flcount; + btree += pag->pagf_btreeblks; + } /* - * Check if sb_agblocks is aligned at stripe boundary - * If sb_agblocks is NOT aligned turn off m_dalign since - * allocator alignment is within an ag, therefore ag has - * to be aligned at stripe boundary. + * Overwrite incore superblock counters with just-read data */ - update_flags = 0LL; + spin_lock(&mp->m_sb_lock); + sbp->sb_ifree = ifree; + sbp->sb_icount = ialloc; + sbp->sb_fdblocks = bfree + bfreelst + btree; + spin_unlock(&mp->m_sb_lock); + + /* Fixup the per-cpu counters as well. */ + xfs_icsb_reinit_counters(mp); + + return 0; +} + +/* + * Update alignment values based on mount options and sb values + */ +STATIC int +xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) +{ + xfs_sb_t *sbp = &(mp->m_sb); + if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { /* * If stripe unit and stripe width are not multiples @@ -687,8 +744,7 @@ xfs_mountfs( if (mp->m_flags & XFS_MOUNT_RETERR) { cmn_err(CE_WARN, "XFS: alignment check 1 failed"); - error = XFS_ERROR(EINVAL); - goto error1; + return XFS_ERROR(EINVAL); } mp->m_dalign = mp->m_swidth = 0; } else { @@ -698,8 +754,7 @@ xfs_mountfs( mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { if (mp->m_flags & XFS_MOUNT_RETERR) { - error = XFS_ERROR(EINVAL); - goto error1; + return XFS_ERROR(EINVAL); } xfs_fs_cmn_err(CE_WARN, mp, "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", @@ -716,8 +771,7 @@ xfs_mountfs( "stripe alignment turned off: sunit(%d) less than bsize(%d)", mp->m_dalign, mp->m_blockmask +1); - error = XFS_ERROR(EINVAL); - goto error1; + return XFS_ERROR(EINVAL); } mp->m_swidth = 0; } @@ -730,11 +784,11 @@ xfs_mountfs( if (XFS_SB_VERSION_HASDALIGN(sbp)) { if (sbp->sb_unit != mp->m_dalign) { sbp->sb_unit = mp->m_dalign; - update_flags |= XFS_SB_UNIT; + *update_flags |= XFS_SB_UNIT; } if (sbp->sb_width != mp->m_swidth) { sbp->sb_width = mp->m_swidth; - update_flags |= XFS_SB_WIDTH; + *update_flags |= XFS_SB_WIDTH; } } } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && @@ -743,51 +797,45 @@ xfs_mountfs( mp->m_swidth = sbp->sb_width; } - xfs_alloc_compute_maxlevels(mp); - xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); - xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); - xfs_ialloc_compute_maxlevels(mp); + return 0; +} - if (sbp->sb_imax_pct) { - __uint64_t icount; +/* + * Set the maximum inode count for this filesystem + */ +STATIC void +xfs_set_maxicount(xfs_mount_t *mp) +{ + xfs_sb_t *sbp = &(mp->m_sb); + __uint64_t icount; - /* Make sure the maximum inode count is a multiple of the - * units we allocate inodes in. + if (sbp->sb_imax_pct) { + /* + * Make sure the maximum inode count is a multiple + * of the units we allocate inodes in. */ - icount = sbp->sb_dblocks * sbp->sb_imax_pct; do_div(icount, 100); do_div(icount, mp->m_ialloc_blks); mp->m_maxicount = (icount * mp->m_ialloc_blks) << sbp->sb_inopblog; - } else + } else { mp->m_maxicount = 0; - - mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); - - /* - * XFS uses the uuid from the superblock as the unique - * identifier for fsid. We can not use the uuid from the volume - * since a single partition filesystem is identical to a single - * partition volume/filesystem. - */ - if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && - (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { - if (xfs_uuid_mount(mp)) { - error = XFS_ERROR(EINVAL); - goto error1; - } - uuid_mounted=1; - ret64 = uuid_hash64(&sbp->sb_uuid); - memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64)); } +} + +/* + * Set the default minimum read and write sizes unless + * already specified in a mount option. + * We use smaller I/O sizes when the file system + * is being used for NFS service (wsync mount option). + */ +STATIC void +xfs_set_rw_sizes(xfs_mount_t *mp) +{ + xfs_sb_t *sbp = &(mp->m_sb); + int readio_log, writeio_log; - /* - * Set the default minimum read and write sizes unless - * already specified in a mount option. - * We use smaller I/O sizes when the file system - * is being used for NFS service (wsync mount option). - */ if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { if (mp->m_flags & XFS_MOUNT_WSYNC) { readio_log = XFS_WSYNC_READIO_LOG; @@ -801,16 +849,6 @@ xfs_mountfs( writeio_log = mp->m_writeio_log; } - /* - * Set the number of readahead buffers to use based on - * physical memory size. - */ - if (xfs_physmem <= 4096) /* <= 16MB */ - mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB; - else if (xfs_physmem <= 8192) /* <= 32MB */ - mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB; - else - mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32; if (sbp->sb_blocklog > readio_log) { mp->m_readio_log = sbp->sb_blocklog; } else { @@ -823,20 +861,14 @@ xfs_mountfs( mp->m_writeio_log = writeio_log; } mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); +} - /* - * Set the inode cluster size based on the physical memory - * size. This may still be overridden by the file system - * block size if it is larger than the chosen cluster size. - */ - if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */ - mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE; - } else { - mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; - } - /* - * Set whether we're using inode alignment. - */ +/* + * Set whether we're using inode alignment. + */ +STATIC void +xfs_set_inoalignment(xfs_mount_t *mp) +{ if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) @@ -852,14 +884,22 @@ xfs_mountfs( mp->m_sinoalign = mp->m_dalign; else mp->m_sinoalign = 0; - /* - * Check that the data (and log if separate) are an ok size. - */ +} + +/* + * Check that the data (and log if separate) are an ok size. + */ +STATIC int +xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) +{ + xfs_buf_t *bp; + xfs_daddr_t d; + int error; + d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { cmn_err(CE_WARN, "XFS: size check 1 failed"); - error = XFS_ERROR(E2BIG); - goto error1; + return XFS_ERROR(E2BIG); } error = xfs_read_buf(mp, mp->m_ddev_targp, d - XFS_FSS_TO_BB(mp, 1), @@ -868,10 +908,9 @@ xfs_mountfs( xfs_buf_relse(bp); } else { cmn_err(CE_WARN, "XFS: size check 2 failed"); - if (error == ENOSPC) { + if (error == ENOSPC) error = XFS_ERROR(E2BIG); - } - goto error1; + return error; } if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && @@ -879,8 +918,7 @@ xfs_mountfs( d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { cmn_err(CE_WARN, "XFS: size check 3 failed"); - error = XFS_ERROR(E2BIG); - goto error1; + return XFS_ERROR(E2BIG); } error = xfs_read_buf(mp, mp->m_logdev_targp, d - XFS_FSB_TO_BB(mp, 1), @@ -889,17 +927,111 @@ xfs_mountfs( xfs_buf_relse(bp); } else { cmn_err(CE_WARN, "XFS: size check 3 failed"); - if (error == ENOSPC) { + if (error == ENOSPC) error = XFS_ERROR(E2BIG); - } + return error; + } + } + return 0; +} + +/* + * xfs_mountfs + * + * This function does the following on an initial mount of a file system: + * - reads the superblock from disk and init the mount struct + * - if we're a 32-bit kernel, do a size check on the superblock + * so we don't mount terabyte filesystems + * - init mount struct realtime fields + * - allocate inode hash table for fs + * - init directory manager + * - perform recovery and init the log manager + */ +int +xfs_mountfs( + xfs_mount_t *mp, + int mfsi_flags) +{ + xfs_sb_t *sbp = &(mp->m_sb); + xfs_inode_t *rip; + bhv_vnode_t *rvp = NULL; + __uint64_t resblks; + __int64_t update_flags = 0LL; + uint quotamount, quotaflags; + int agno; + int uuid_mounted = 0; + int error = 0; + + if (mp->m_sb_bp == NULL) { + error = xfs_readsb(mp, mfsi_flags); + if (error) + return error; + } + xfs_mount_common(mp, sbp); + + /* + * Check if sb_agblocks is aligned at stripe boundary + * If sb_agblocks is NOT aligned turn off m_dalign since + * allocator alignment is within an ag, therefore ag has + * to be aligned at stripe boundary. + */ + error = xfs_update_alignment(mp, mfsi_flags, &update_flags); + if (error) + goto error1; + + xfs_alloc_compute_maxlevels(mp); + xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); + xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); + xfs_ialloc_compute_maxlevels(mp); + + xfs_set_maxicount(mp); + + mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); + + /* + * XFS uses the uuid from the superblock as the unique + * identifier for fsid. We can not use the uuid from the volume + * since a single partition filesystem is identical to a single + * partition volume/filesystem. + */ + if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && + (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { + if (xfs_uuid_mount(mp)) { + error = XFS_ERROR(EINVAL); goto error1; } + uuid_mounted=1; } + /* + * Set the minimum read and write sizes + */ + xfs_set_rw_sizes(mp); + + /* + * Set the inode cluster size. + * This may still be overridden by the file system + * block size if it is larger than the chosen cluster size. + */ + mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; + + /* + * Set inode alignment fields + */ + xfs_set_inoalignment(mp); + + /* + * Check that the data (and log if separate) are an ok size. + */ + error = xfs_check_sizes(mp, mfsi_flags); + if (error) + goto error1; + /* * Initialize realtime fields in the mount structure */ - if ((error = xfs_rtmount_init(mp))) { + error = xfs_rtmount_init(mp); + if (error) { cmn_err(CE_WARN, "XFS: RT mount failed"); goto error1; } @@ -917,16 +1049,6 @@ xfs_mountfs( */ uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); - /* - * The vfs structure needs to have a file system independent - * way of checking for the invariant file system ID. Since it - * can't look at mount structures it has a pointer to the data - * in the mount structure. - * - * File systems that don't support user level file handles (i.e. - * all of them except for XFS) will leave vfs_altfsid as NULL. - */ - vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid; mp->m_dmevmask = 0; /* not persistent; set after each mount */ xfs_dir_mount(mp); @@ -941,13 +1063,6 @@ xfs_mountfs( */ xfs_trans_init(mp); - /* - * Allocate and initialize the inode hash table for this - * file system. - */ - xfs_ihash_init(mp); - xfs_chash_init(mp); - /* * Allocate and initialize the per-ag data. */ @@ -955,7 +1070,7 @@ xfs_mountfs( mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); - mp->m_maxagi = xfs_initialize_perag(vfsp, mp, sbp->sb_agcount); + mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); /* * log's mount-time initialization. Perform 1st part recovery if needed @@ -975,6 +1090,34 @@ xfs_mountfs( goto error2; } + /* + * Now the log is mounted, we know if it was an unclean shutdown or + * not. If it was, with the first phase of recovery has completed, we + * have consistent AG blocks on disk. We have not recovered EFIs yet, + * but they are recovered transactionally in the second recovery phase + * later. + * + * Hence we can safely re-initialise incore superblock counters from + * the per-ag data. These may not be correct if the filesystem was not + * cleanly unmounted, so we need to wait for recovery to finish before + * doing this. + * + * If the filesystem was cleanly unmounted, then we can trust the + * values in the superblock to be correct and we don't need to do + * anything here. + * + * If we are currently making the filesystem, the initialisation will + * fail as the perag data is in an undefined state. + */ + + if (xfs_sb_version_haslazysbcount(&mp->m_sb) && + !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && + !mp->m_sb.sb_inprogress) { + error = xfs_initialize_perag_data(mp, sbp->sb_agcount); + if (error) { + goto error2; + } + } /* * Get and sanity-check the root inode. * Save the pointer to it in the mount structure. @@ -1006,7 +1149,8 @@ xfs_mountfs( /* * Initialize realtime inode pointers in the mount structure */ - if ((error = xfs_rtmount_inodes(mp))) { + error = xfs_rtmount_inodes(mp); + if (error) { /* * Free up the root inode. */ @@ -1018,13 +1162,14 @@ xfs_mountfs( * If fs is not mounted readonly, then update the superblock * unit and width changes. */ - if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY)) + if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) xfs_mount_log_sbunit(mp, update_flags); /* * Initialise the XFS quota management subsystem for this mount */ - if ((error = XFS_QM_INIT(mp, "amount, "aflags))) + error = XFS_QM_INIT(mp, "amount, "aflags); + if (error) goto error4; /* @@ -1041,9 +1186,27 @@ xfs_mountfs( /* * Complete the quota initialisation, post-log-replay component. */ - if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) + error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); + if (error) goto error4; + /* + * Now we are mounted, reserve a small amount of unused space for + * privileged transactions. This is needed so that transaction + * space required for critical operations can dip into this pool + * when at ENOSPC. This is needed for operations like create with + * attr, unwritten extent conversion at ENOSPC, etc. Data allocations + * are not allowed to use this reserved space. + * + * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. + * This may drive us straight to ENOSPC on mount, but that implies + * we were already there on the last unmount. + */ + resblks = mp->m_sb.sb_dblocks; + do_div(resblks, 20); + resblks = min_t(__uint64_t, resblks, 1024); + xfs_reserve_blocks(mp, &resblks, NULL); + return 0; error4: @@ -1054,8 +1217,6 @@ xfs_mountfs( error3: xfs_log_unmount_dealloc(mp); error2: - xfs_ihash_free(mp); - xfs_chash_free(mp); for (agno = 0; agno < sbp->sb_agcount; agno++) if (mp->m_perag[agno].pagb_list) kmem_free(mp->m_perag[agno].pagb_list, @@ -1079,11 +1240,19 @@ xfs_mountfs( int xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) { - struct bhv_vfs *vfsp = XFS_MTOVFS(mp); -#if defined(DEBUG) || defined(INDUCE_IO_ERROR) - int64_t fsid; -#endif + __uint64_t resblks; + /* + * We can potentially deadlock here if we have an inode cluster + * that has been freed has it's buffer still pinned in memory because + * the transaction is still sitting in a iclog. The stale inodes + * on that buffer will have their flush locks held until the + * transaction hits the disk and the callbacks run. the inode + * flush takes the flush lock unconditionally and with nothing to + * push out the iclog we will never get that unlocked. hence we + * need to force the log first. + */ + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); xfs_iflush_all(mp); XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); @@ -1100,10 +1269,26 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) xfs_binval(mp->m_rtdev_targp); } - xfs_unmountfs_writesb(mp); + /* + * Unreserve any blocks we have so that when we unmount we don't account + * the reserved free space as used. This is really only necessary for + * lazy superblock counting because it trusts the incore superblock + * counters to be aboslutely correct on clean unmount. + * + * We don't bother correcting this elsewhere for lazy superblock + * counting because on mount of an unclean filesystem we reconstruct the + * correct counter value and this is irrelevant. + * + * For non-lazy counter filesystems, this doesn't matter at all because + * we only every apply deltas to the superblock and hence the incore + * value does not matter.... + */ + resblks = 0; + xfs_reserve_blocks(mp, &resblks, NULL); + xfs_log_sbcount(mp, 1); + xfs_unmountfs_writesb(mp); xfs_unmountfs_wait(mp); /* wait for async bufs */ - xfs_log_unmount(mp); /* Done! No more fs ops. */ xfs_freesb(mp); @@ -1118,21 +1303,16 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) xfs_uuid_unmount(mp); #if defined(DEBUG) || defined(INDUCE_IO_ERROR) - /* - * clear all error tags on this filesystem - */ - memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t)); - xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0); + xfs_errortag_clearall(mp, 0); #endif - XFS_IODONE(vfsp); - xfs_mount_free(mp, 1); + xfs_mount_free(mp); return 0; } void xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr) { - if (mp->m_logdev_targp != mp->m_ddev_targp) + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) xfs_free_buftarg(mp->m_logdev_targp, 1); if (mp->m_rtdev_targp) xfs_free_buftarg(mp->m_rtdev_targp, 1); @@ -1149,35 +1329,99 @@ xfs_unmountfs_wait(xfs_mount_t *mp) xfs_wait_buftarg(mp->m_ddev_targp); } +int +xfs_fs_writable(xfs_mount_t *mp) +{ + return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) || + (mp->m_flags & XFS_MOUNT_RDONLY)); +} + +/* + * xfs_log_sbcount + * + * Called either periodically to keep the on disk superblock values + * roughly up to date or from unmount to make sure the values are + * correct on a clean unmount. + * + * Note this code can be called during the process of freezing, so + * we may need to use the transaction allocator which does not not + * block when the transaction subsystem is in its frozen state. + */ +int +xfs_log_sbcount( + xfs_mount_t *mp, + uint sync) +{ + xfs_trans_t *tp; + int error; + + if (!xfs_fs_writable(mp)) + return 0; + + xfs_icsb_sync_counters(mp); + + /* + * we don't need to do this if we are updating the superblock + * counters on every modification. + */ + if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) + return 0; + + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT); + error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, + XFS_DEFAULT_LOG_COUNT); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); + if (sync) + xfs_trans_set_sync(tp); + xfs_trans_commit(tp, 0); + + return 0; +} + +STATIC void +xfs_mark_shared_ro( + xfs_mount_t *mp, + xfs_buf_t *bp) +{ + xfs_dsb_t *sb = XFS_BUF_TO_SBP(bp); + __uint16_t version; + + if (!(sb->sb_flags & XFS_SBF_READONLY)) + sb->sb_flags |= XFS_SBF_READONLY; + + version = be16_to_cpu(sb->sb_versionnum); + if ((version & XFS_SB_VERSION_NUMBITS) != XFS_SB_VERSION_4 || + !(version & XFS_SB_VERSION_SHAREDBIT)) + version |= XFS_SB_VERSION_SHAREDBIT; + sb->sb_versionnum = cpu_to_be16(version); +} + int xfs_unmountfs_writesb(xfs_mount_t *mp) { xfs_buf_t *sbp; - xfs_sb_t *sb; int error = 0; /* * skip superblock write if fs is read-only, or * if we are doing a forced umount. */ - sbp = xfs_getsb(mp, 0); - if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || + if (!((mp->m_flags & XFS_MOUNT_RDONLY) || XFS_FORCED_SHUTDOWN(mp))) { - xfs_icsb_sync_counters(mp); + sbp = xfs_getsb(mp, 0); /* * mark shared-readonly if desired */ - sb = XFS_BUF_TO_SBP(sbp); - if (mp->m_mk_sharedro) { - if (!(sb->sb_flags & XFS_SBF_READONLY)) - sb->sb_flags |= XFS_SBF_READONLY; - if (!XFS_SB_VERSION_HASSHARED(sb)) - XFS_SB_VERSION_ADDSHARED(sb); - xfs_fs_cmn_err(CE_NOTE, mp, - "Unmounting, marking shared read-only"); - } + if (mp->m_mk_sharedro) + xfs_mark_shared_ro(mp, sbp); + XFS_BUF_UNDONE(sbp); XFS_BUF_UNREAD(sbp); XFS_BUF_UNDELAYWRITE(sbp); @@ -1192,8 +1436,8 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) mp, sbp, XFS_BUF_ADDR(sbp)); if (error && mp->m_mk_sharedro) xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); + xfs_buf_relse(sbp); } - xfs_buf_relse(sbp); return error; } @@ -1211,7 +1455,6 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) int first; int last; xfs_mount_t *mp; - xfs_sb_t *sbp; xfs_sb_field_t f; ASSERT(fields); @@ -1219,13 +1462,12 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) return; mp = tp->t_mountp; bp = xfs_trans_getsb(tp, mp, 0); - sbp = XFS_BUF_TO_SBP(bp); first = sizeof(xfs_sb_t); last = 0; /* translate/copy */ - xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, fields); + xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); /* find modified range */ @@ -1248,7 +1490,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) * Fields are not allowed to dip below zero, so if the delta would * do this do not apply it and return EINVAL. * - * The SB_LOCK must be held when this routine is called. + * The m_sb_lock must be held when this routine is called. */ int xfs_mod_incore_sb_unlocked( @@ -1413,7 +1655,7 @@ xfs_mod_incore_sb_unlocked( /* * xfs_mod_incore_sb() is used to change a field in the in-core * superblock structure by the specified delta. This modification - * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() + * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() * routine to do the work. */ int @@ -1423,7 +1665,6 @@ xfs_mod_incore_sb( int64_t delta, int rsvd) { - unsigned long s; int status; /* check for per-cpu counters */ @@ -1440,9 +1681,9 @@ xfs_mod_incore_sb( /* FALLTHROUGH */ #endif default: - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); break; } @@ -1463,7 +1704,6 @@ xfs_mod_incore_sb( int xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) { - unsigned long s; int status=0; xfs_mod_sb_t *msbp; @@ -1471,10 +1711,10 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) * Loop through the array of mod structures and apply each * individually. If any fail, then back out all those * which have already been applied. Do all of this within - * the scope of the SB_LOCK so that all of the changes will + * the scope of the m_sb_lock so that all of the changes will * be atomic. */ - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); msbp = &msb[0]; for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { /* @@ -1488,11 +1728,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) case XFS_SBS_IFREE: case XFS_SBS_FDBLOCKS: if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); status = xfs_icsb_modify_counters(mp, msbp->msb_field, msbp->msb_delta, rsvd); - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); break; } /* FALLTHROUGH */ @@ -1526,12 +1766,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) case XFS_SBS_IFREE: case XFS_SBS_FDBLOCKS: if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); status = xfs_icsb_modify_counters(mp, msbp->msb_field, -(msbp->msb_delta), rsvd); - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); break; } /* FALLTHROUGH */ @@ -1547,7 +1787,7 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) msbp--; } } - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); return status; } @@ -1695,12 +1935,12 @@ xfs_mount_log_sbunit( * * Locking rules: * - * 1. XFS_SB_LOCK() before picking up per-cpu locks + * 1. m_sb_lock before picking up per-cpu locks * 2. per-cpu locks always picked up via for_each_online_cpu() order - * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks + * 3. accurate counter sync requires m_sb_lock + per cpu locks * 4. modifying per-cpu counters requires holding per-cpu lock - * 5. modifying global counters requires holding XFS_SB_LOCK - * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK + * 5. modifying global counters requires holding m_sb_lock + * 6. enabling or disabling a counter requires holding the m_sb_lock * and _none_ of the per-cpu locks. * * Disabled counters are only ever re-enabled by a balance operation @@ -1727,7 +1967,6 @@ xfs_icsb_cpu_notify( { xfs_icsb_cnts_t *cntp; xfs_mount_t *mp; - int s; mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); cntp = (xfs_icsb_cnts_t *) @@ -1753,7 +1992,7 @@ xfs_icsb_cpu_notify( * count into the total on the global superblock and * re-enable the counters. */ xfs_icsb_lock(mp); - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); @@ -1770,7 +2009,7 @@ xfs_icsb_cpu_notify( XFS_ICSB_SB_LOCKED, 0); xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED, 0); - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); xfs_icsb_unlock(mp); break; } @@ -2001,11 +2240,10 @@ xfs_icsb_sync_counters_flags( int flags) { xfs_icsb_cnts_t cnt; - int s; /* Pass 1: lock all counters */ if ((flags & XFS_ICSB_SB_LOCKED) == 0) - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); xfs_icsb_count(mp, &cnt, flags); @@ -2018,7 +2256,7 @@ xfs_icsb_sync_counters_flags( mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; if ((flags & XFS_ICSB_SB_LOCKED) == 0) - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); } /* @@ -2059,11 +2297,10 @@ xfs_icsb_balance_counter( { uint64_t count, resid; int weight = num_online_cpus(); - int s; uint64_t min = (uint64_t)min_per_cpu; if (!(flags & XFS_ICSB_SB_LOCKED)) - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); /* disable counter and sync counter */ xfs_icsb_disable_counter(mp, field); @@ -2097,10 +2334,10 @@ xfs_icsb_balance_counter( xfs_icsb_enable_counter(mp, field, count, resid); out: if (!(flags & XFS_ICSB_SB_LOCKED)) - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); } -int +STATIC int xfs_icsb_modify_counters( xfs_mount_t *mp, xfs_sb_field_t field, @@ -2109,7 +2346,7 @@ xfs_icsb_modify_counters( { xfs_icsb_cnts_t *icsbp; long long lcounter; /* long counter for 64 bit fields */ - int cpu, ret = 0, s; + int cpu, ret = 0; might_sleep(); again: @@ -2187,15 +2424,15 @@ slow_path: * running atomically here, we know a rebalance cannot * be in progress. Hence we can go straight to operating * on the global superblock. We do not call xfs_mod_incore_sb() - * here even though we need to get the SB_LOCK. Doing so + * here even though we need to get the m_sb_lock. Doing so * will cause us to re-enter this function and deadlock. - * Hence we get the SB_LOCK ourselves and then call + * Hence we get the m_sb_lock ourselves and then call * xfs_mod_incore_sb_unlocked() as the unlocked path operates * directly on the global counters. */ - s = XFS_SB_LOCK(mp); + spin_lock(&mp->m_sb_lock); ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - XFS_SB_UNLOCK(mp, s); + spin_unlock(&mp->m_sb_lock); /* * Now that we've modified the global superblock, we