X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_itable.c;h=45d8776408ef2a4f0829c25caf9130adf6e57169;hb=fc6149d8d9634814cdcd9283b8f2efd3359181df;hp=4c2454bcc714f6db40d0402b886dc4e44e69bf4e;hpb=492559af235eb56884d62553f191c0b5c4def990;p=linux-2.6-omap-h63xx.git diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 4c2454bcc71..45d8776408e 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -45,7 +45,7 @@ xfs_internal_inum( xfs_ino_t ino) { return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || - (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && + (xfs_sb_version_hasquota(&mp->m_sb) && (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); } @@ -57,7 +57,7 @@ xfs_bulkstat_one_iget( xfs_bstat_t *buf, /* return buffer */ int *stat) /* BULKSTAT_RV_... */ { - xfs_dinode_core_t *dic; /* dinode core info pointer */ + xfs_icdinode_t *dic; /* dinode core info pointer */ xfs_inode_t *ip; /* incore inode pointer */ bhv_vnode_t *vp; int error; @@ -151,37 +151,37 @@ xfs_bulkstat_one_dinode( * the new format. We don't change the version number so that we * can distinguish this from a real new format inode. */ - if (INT_GET(dic->di_version, ARCH_CONVERT) == XFS_DINODE_VERSION_1) { - buf->bs_nlink = INT_GET(dic->di_onlink, ARCH_CONVERT); + if (dic->di_version == XFS_DINODE_VERSION_1) { + buf->bs_nlink = be16_to_cpu(dic->di_onlink); buf->bs_projid = 0; } else { - buf->bs_nlink = INT_GET(dic->di_nlink, ARCH_CONVERT); - buf->bs_projid = INT_GET(dic->di_projid, ARCH_CONVERT); + buf->bs_nlink = be32_to_cpu(dic->di_nlink); + buf->bs_projid = be16_to_cpu(dic->di_projid); } buf->bs_ino = ino; - buf->bs_mode = INT_GET(dic->di_mode, ARCH_CONVERT); - buf->bs_uid = INT_GET(dic->di_uid, ARCH_CONVERT); - buf->bs_gid = INT_GET(dic->di_gid, ARCH_CONVERT); - buf->bs_size = INT_GET(dic->di_size, ARCH_CONVERT); - buf->bs_atime.tv_sec = INT_GET(dic->di_atime.t_sec, ARCH_CONVERT); - buf->bs_atime.tv_nsec = INT_GET(dic->di_atime.t_nsec, ARCH_CONVERT); - buf->bs_mtime.tv_sec = INT_GET(dic->di_mtime.t_sec, ARCH_CONVERT); - buf->bs_mtime.tv_nsec = INT_GET(dic->di_mtime.t_nsec, ARCH_CONVERT); - buf->bs_ctime.tv_sec = INT_GET(dic->di_ctime.t_sec, ARCH_CONVERT); - buf->bs_ctime.tv_nsec = INT_GET(dic->di_ctime.t_nsec, ARCH_CONVERT); - buf->bs_xflags = xfs_dic2xflags(dic); - buf->bs_extsize = INT_GET(dic->di_extsize, ARCH_CONVERT) << mp->m_sb.sb_blocklog; - buf->bs_extents = INT_GET(dic->di_nextents, ARCH_CONVERT); - buf->bs_gen = INT_GET(dic->di_gen, ARCH_CONVERT); + buf->bs_mode = be16_to_cpu(dic->di_mode); + buf->bs_uid = be32_to_cpu(dic->di_uid); + buf->bs_gid = be32_to_cpu(dic->di_gid); + buf->bs_size = be64_to_cpu(dic->di_size); + buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec); + buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec); + buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec); + buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); + buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); + buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); + buf->bs_xflags = xfs_dic2xflags(dip); + buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; + buf->bs_extents = be32_to_cpu(dic->di_nextents); + buf->bs_gen = be32_to_cpu(dic->di_gen); memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); - buf->bs_dmevmask = INT_GET(dic->di_dmevmask, ARCH_CONVERT); - buf->bs_dmstate = INT_GET(dic->di_dmstate, ARCH_CONVERT); - buf->bs_aextents = INT_GET(dic->di_anextents, ARCH_CONVERT); + buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask); + buf->bs_dmstate = be16_to_cpu(dic->di_dmstate); + buf->bs_aextents = be16_to_cpu(dic->di_anextents); - switch (INT_GET(dic->di_format, ARCH_CONVERT)) { + switch (dic->di_format) { case XFS_DINODE_FMT_DEV: - buf->bs_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT); + buf->bs_rdev = be32_to_cpu(dip->di_u.di_dev); buf->bs_blksize = BLKDEV_IOSIZE; buf->bs_blocks = 0; break; @@ -195,7 +195,7 @@ xfs_bulkstat_one_dinode( case XFS_DINODE_FMT_BTREE: buf->bs_rdev = 0; buf->bs_blksize = mp->m_sb.sb_blocksize; - buf->bs_blocks = INT_GET(dic->di_nblocks, ARCH_CONVERT); + buf->bs_blocks = be64_to_cpu(dic->di_nblocks); break; } @@ -290,17 +290,24 @@ xfs_bulkstat_use_dinode( return 1; dip = (xfs_dinode_t *) xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); - if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC || - !XFS_DINODE_GOOD_VERSION( - INT_GET(dip->di_core.di_version, ARCH_CONVERT))) + /* + * Check the buffer containing the on-disk inode for di_mode == 0. + * This is to prevent xfs_bulkstat from picking up just reclaimed + * inodes that have their in-core state initialized but not flushed + * to disk yet. This is a temporary hack that would require a proper + * fix in the future. + */ + if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || + !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || + !dip->di_core.di_mode) return 0; if (flags & BULKSTAT_FG_QUICK) { *dipp = dip; return 1; } /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ - aformat = INT_GET(dip->di_core.di_aformat, ARCH_CONVERT); - if ((XFS_CFORK_Q(&dip->di_core) == 0) || + aformat = dip->di_core.di_aformat; + if ((XFS_DFORK_Q(dip) == 0) || (aformat == XFS_DINODE_FMT_LOCAL) || (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { *dipp = dip; @@ -309,6 +316,8 @@ xfs_bulkstat_use_dinode( return 1; } +#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) + /* * Return stat information in bulk (by-inode) for the filesystem. */ @@ -346,7 +355,7 @@ xfs_bulkstat( xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ - xfs_ino_t lastino=0; /* last inode number returned */ + xfs_ino_t lastino; /* last inode number returned */ int nbcluster; /* # of blocks in a cluster */ int nicluster; /* # of inodes in a cluster */ int nimask; /* mask for inode clusters */ @@ -366,6 +375,7 @@ xfs_bulkstat( * Get the last inode value, see if there's nothing to do. */ ino = (xfs_ino_t)*lastinop; + lastino = ino; dip = NULL; agno = XFS_INO_TO_AGNO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino); @@ -375,6 +385,9 @@ xfs_bulkstat( *ubcountp = 0; return 0; } + if (!ubcountp || *ubcountp <= 0) { + return EINVAL; + } ubcount = *ubcountp; /* statstruct's */ ubleft = ubcount * statstruct_size; /* bytes */ *ubcountp = ubelem = 0; @@ -386,7 +399,7 @@ xfs_bulkstat( (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); nimask = ~(nicluster - 1); nbcluster = nicluster >> mp->m_sb.sb_inopblog; - irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4, + irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4, KM_SLEEP | KM_MAYFAIL | KM_LARGE); nirbuf = irbsize / sizeof(*irbuf); @@ -395,7 +408,8 @@ xfs_bulkstat( * inode returned; 0 means start of the allocation group. */ rval = 0; - while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) { + while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { + cond_resched(); bp = NULL; down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); @@ -492,6 +506,7 @@ xfs_bulkstat( break; error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); + cond_resched(); } /* * If ran off the end of the ag either with an error, @@ -535,6 +550,7 @@ xfs_bulkstat( */ agino = gino + XFS_INODES_PER_CHUNK; error = xfs_inobt_increment(cur, 0, &tmp); + cond_resched(); } /* * Drop the btree buffers and the agi buffer. @@ -548,12 +564,12 @@ xfs_bulkstat( */ irbufend = irbp; for (irbp = irbuf; - irbp < irbufend && ubleft >= statstruct_size; irbp++) { + irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { /* * Now process this chunk of inodes. */ for (agino = irbp->ir_startino, chunkidx = clustidx = 0; - ubleft > 0 && + XFS_BULKSTAT_UBLEFT(ubleft) && irbp->ir_freecount < XFS_INODES_PER_CHUNK; chunkidx++, clustidx++, agino++) { ASSERT(chunkidx < XFS_INODES_PER_CHUNK); @@ -598,7 +614,8 @@ xfs_bulkstat( xfs_buf_relse(bp); error = xfs_itobp(mp, NULL, ip, &dip, &bp, bno, - XFS_IMAP_BULKSTAT); + XFS_IMAP_BULKSTAT, + XFS_BUF_LOCK); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; kmem_zone_free(xfs_inode_zone, ip); @@ -612,21 +629,25 @@ xfs_bulkstat( } } } + ino = XFS_AGINO_TO_INO(mp, agno, agino); + bno = XFS_AGB_TO_DADDR(mp, agno, agbno); /* * Skip if this inode is free. */ - if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) + if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { + lastino = ino; continue; + } /* * Count used inodes as free so we can tell * when the chunk is used up. */ irbp->ir_freecount++; - ino = XFS_AGINO_TO_INO(mp, agno, agino); - bno = XFS_AGB_TO_DADDR(mp, agno, agbno); if (!xfs_bulkstat_use_dinode(mp, flags, bp, - clustidx, &dip)) + clustidx, &dip)) { + lastino = ino; continue; + } /* * If we need to do an iget, cannot hold bp. * Drop it, until starting the next cluster. @@ -652,15 +673,13 @@ xfs_bulkstat( ubleft, private_data, bno, &ubused, dip, &fmterror); if (fmterror == BULKSTAT_RV_NOTHING) { - if (error == EFAULT) { - ubleft = 0; - rval = error; - break; - } - else if (error == ENOMEM) + if (error && error != ENOENT && + error != EINVAL) { ubleft = 0; - else - lastino = ino; + rval = error; + break; + } + lastino = ino; continue; } if (fmterror == BULKSTAT_RV_GIVEUP) { @@ -675,6 +694,8 @@ xfs_bulkstat( ubelem++; lastino = ino; } + + cond_resched(); } if (bp) @@ -683,7 +704,7 @@ xfs_bulkstat( /* * Set up for the next loop iteration. */ - if (ubleft > 0) { + if (XFS_BULKSTAT_UBLEFT(ubleft)) { if (end_of_ag) { agno++; agino = 0; @@ -697,6 +718,11 @@ xfs_bulkstat( */ kmem_free(irbuf, irbsize); *ubcountp = ubelem; + /* + * Found some inodes, return them now and return the error next time. + */ + if (ubelem) + rval = 0; if (agno >= mp->m_sb.sb_agcount) { /* * If we ran out of filesystem, mark lastino as off @@ -805,7 +831,7 @@ xfs_inumbers( agino = XFS_INO_TO_AGINO(mp, ino); left = *count; *count = 0; - bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); + bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); error = bufidx = 0; cur = NULL;