X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_iget.c;h=03f32c68731f3abaa6bee1d58743dd1edf6e0321;hb=2aeaa258c0527026228c43148ec6dffdc56bea1c;hp=c1c89dac19cceceed3aff020651b9eac349cf98e;hpb=bd3c97a7c718bfb9f1e4f31c16c383a5c6f815eb;p=linux-2.6-omap-h63xx.git diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index c1c89dac19c..03f32c68731 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -39,132 +39,14 @@ #include "xfs_quota.h" #include "xfs_utils.h" -/* - * Initialize the inode hash table for the newly mounted file system. - * Choose an initial table size based on user specified value, else - * use a simple algorithm using the maximum number of inodes as an - * indicator for table size, and clamp it between one and some large - * number of pages. - */ -void -xfs_ihash_init(xfs_mount_t *mp) -{ - __uint64_t icount; - uint i; - - if (!mp->m_ihsize) { - icount = mp->m_maxicount ? mp->m_maxicount : - (mp->m_sb.sb_dblocks << mp->m_sb.sb_inopblog); - mp->m_ihsize = 1 << max_t(uint, 8, - (xfs_highbit64(icount) + 1) / 2); - mp->m_ihsize = min_t(uint, mp->m_ihsize, - (64 * NBPP) / sizeof(xfs_ihash_t)); - } - - mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize, - NBPC * sizeof(xfs_ihash_t), - mp->m_ihsize * sizeof(xfs_ihash_t), - KM_SLEEP | KM_MAYFAIL | KM_LARGE); - mp->m_ihsize /= sizeof(xfs_ihash_t); - for (i = 0; i < mp->m_ihsize; i++) - rwlock_init(&(mp->m_ihash[i].ih_lock)); -} - -/* - * Free up structures allocated by xfs_ihash_init, at unmount time. - */ -void -xfs_ihash_free(xfs_mount_t *mp) -{ - kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t)); - mp->m_ihash = NULL; -} - -/* - * Initialize the inode cluster hash table for the newly mounted file system. - * Its size is derived from the ihash table size. - */ -void -xfs_chash_init(xfs_mount_t *mp) -{ - uint i; - - mp->m_chsize = max_t(uint, 1, mp->m_ihsize / - (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)); - mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize); - mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize - * sizeof(xfs_chash_t), - KM_SLEEP | KM_LARGE); - for (i = 0; i < mp->m_chsize; i++) { - spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); - } -} - -/* - * Free up structures allocated by xfs_chash_init, at unmount time. - */ -void -xfs_chash_free(xfs_mount_t *mp) -{ - int i; - - for (i = 0; i < mp->m_chsize; i++) { - spinlock_destroy(&mp->m_chash[i].ch_lock); - } - - kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t)); - mp->m_chash = NULL; -} - -/* - * Try to move an inode to the front of its hash list if possible - * (and if its not there already). Called right after obtaining - * the list version number and then dropping the read_lock on the - * hash list in question (which is done right after looking up the - * inode in question...). - */ -STATIC void -xfs_ihash_promote( - xfs_ihash_t *ih, - xfs_inode_t *ip, - ulong version) -{ - xfs_inode_t *iq; - - if ((ip->i_prevp != &ih->ih_next) && write_trylock(&ih->ih_lock)) { - if (likely(version == ih->ih_version)) { - /* remove from list */ - if ((iq = ip->i_next)) { - iq->i_prevp = ip->i_prevp; - } - *ip->i_prevp = iq; - - /* insert at list head */ - iq = ih->ih_next; - iq->i_prevp = &ip->i_next; - ip->i_next = iq; - ip->i_prevp = &ih->ih_next; - ih->ih_next = ip; - } - write_unlock(&ih->ih_lock); - } -} - /* * Look up an inode by number in the given file system. - * The inode is looked up in the hash table for the file system - * represented by the mount point parameter mp. Each bucket of - * the hash table is guarded by an individual semaphore. - * - * If the inode is found in the hash table, its corresponding vnode - * is obtained with a call to vn_get(). This call takes care of - * coordination with the reclamation of the inode and vnode. Note - * that the vmap structure is filled in while holding the hash lock. - * This gives us the state of the inode/vnode when we found it and - * is used for coordination in vn_get(). + * The inode is looked up in the cache held in each AG. + * If the inode is found in the cache, attach it to the provided + * vnode. * - * If it is not in core, read it in from the file system's device and - * add the inode into the hash table. + * If it is not in core, read it in from the file system's device, + * add it to the cache and attach the provided vnode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock @@ -192,274 +74,241 @@ xfs_iget_core( xfs_inode_t **ipp, xfs_daddr_t bno) { - xfs_ihash_t *ih; xfs_inode_t *ip; xfs_inode_t *iq; bhv_vnode_t *inode_vp; - ulong version; int error; - /* REFERENCED */ - xfs_chash_t *ch; - xfs_chashlist_t *chl, *chlnew; - SPLDECL(s); + xfs_icluster_t *icl, *new_icl = NULL; + unsigned long first_index, mask; + xfs_perag_t *pag; + xfs_agino_t agino; + + /* the radix tree exists only in inode capable AGs */ + if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) + return EINVAL; + + /* get the perag structure and ensure that it's inode capable */ + pag = xfs_get_perag(mp, ino); + if (!pag->pagi_inodeok) + return EINVAL; + ASSERT(pag->pag_ici_init); + agino = XFS_INO_TO_AGINO(mp, ino); +again: + read_lock(&pag->pag_ici_lock); + ip = radix_tree_lookup(&pag->pag_ici_root, agino); - ih = XFS_IHASH(mp, ino); + if (ip != NULL) { + /* + * If INEW is set this inode is being set up + * we need to pause and try again. + */ + if (xfs_iflags_test(ip, XFS_INEW)) { + read_unlock(&pag->pag_ici_lock); + delay(1); + XFS_STATS_INC(xs_ig_frecycle); -again: - read_lock(&ih->ih_lock); + goto again; + } - for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { - if (ip->i_ino == ino) { + inode_vp = XFS_ITOV_NULL(ip); + if (inode_vp == NULL) { /* - * If INEW is set this inode is being set up + * If IRECLAIM is set this inode is + * on its way out of the system, * we need to pause and try again. */ - if (xfs_iflags_test(ip, XFS_INEW)) { - read_unlock(&ih->ih_lock); + if (xfs_iflags_test(ip, XFS_IRECLAIM)) { + read_unlock(&pag->pag_ici_lock); delay(1); XFS_STATS_INC(xs_ig_frecycle); goto again; } + ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - inode_vp = XFS_ITOV_NULL(ip); - if (inode_vp == NULL) { - /* - * If IRECLAIM is set this inode is - * on its way out of the system, - * we need to pause and try again. - */ - if (xfs_iflags_test(ip, XFS_IRECLAIM)) { - read_unlock(&ih->ih_lock); - delay(1); - XFS_STATS_INC(xs_ig_frecycle); - - goto again; - } - ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - - /* - * If lookup is racing with unlink, then we - * should return an error immediately so we - * don't remove it from the reclaim list and - * potentially leak the inode. - */ - if ((ip->i_d.di_mode == 0) && - !(flags & XFS_IGET_CREATE)) { - read_unlock(&ih->ih_lock); - return ENOENT; - } - - /* - * There may be transactions sitting in the - * incore log buffers or being flushed to disk - * at this time. We can't clear the - * XFS_IRECLAIMABLE flag until these - * transactions have hit the disk, otherwise we - * will void the guarantee the flag provides - * xfs_iunpin() - */ - if (xfs_ipincount(ip)) { - read_unlock(&ih->ih_lock); - xfs_log_force(mp, 0, - XFS_LOG_FORCE|XFS_LOG_SYNC); - XFS_STATS_INC(xs_ig_frecycle); - goto again; - } - - vn_trace_exit(vp, "xfs_iget.alloc", - (inst_t *)__return_address); + /* + * If lookup is racing with unlink, then we + * should return an error immediately so we + * don't remove it from the reclaim list and + * potentially leak the inode. + */ + if ((ip->i_d.di_mode == 0) && + !(flags & XFS_IGET_CREATE)) { + read_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); + return ENOENT; + } - XFS_STATS_INC(xs_ig_found); + /* + * There may be transactions sitting in the + * incore log buffers or being flushed to disk + * at this time. We can't clear the + * XFS_IRECLAIMABLE flag until these + * transactions have hit the disk, otherwise we + * will void the guarantee the flag provides + * xfs_iunpin() + */ + if (xfs_ipincount(ip)) { + read_unlock(&pag->pag_ici_lock); + xfs_log_force(mp, 0, + XFS_LOG_FORCE|XFS_LOG_SYNC); + XFS_STATS_INC(xs_ig_frecycle); + goto again; + } - xfs_iflags_clear(ip, XFS_IRECLAIMABLE); - version = ih->ih_version; - read_unlock(&ih->ih_lock); - xfs_ihash_promote(ih, ip, version); + vn_trace_exit(ip, "xfs_iget.alloc", + (inst_t *)__return_address); - XFS_MOUNT_ILOCK(mp); - list_del_init(&ip->i_reclaim); - XFS_MOUNT_IUNLOCK(mp); + XFS_STATS_INC(xs_ig_found); - goto finish_inode; + xfs_iflags_clear(ip, XFS_IRECLAIMABLE); + read_unlock(&pag->pag_ici_lock); - } else if (vp != inode_vp) { - struct inode *inode = vn_to_inode(inode_vp); + XFS_MOUNT_ILOCK(mp); + list_del_init(&ip->i_reclaim); + XFS_MOUNT_IUNLOCK(mp); - /* The inode is being torn down, pause and - * try again. - */ - if (inode->i_state & (I_FREEING | I_CLEAR)) { - read_unlock(&ih->ih_lock); - delay(1); - XFS_STATS_INC(xs_ig_frecycle); + goto finish_inode; - goto again; - } -/* Chances are the other vnode (the one in the inode) is being torn - * down right now, and we landed on top of it. Question is, what do - * we do? Unhook the old inode and hook up the new one? - */ - cmn_err(CE_PANIC, - "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", - inode_vp, vp); - } + } else if (vp != inode_vp) { + struct inode *inode = vn_to_inode(inode_vp); - /* - * Inode cache hit: if ip is not at the front of - * its hash chain, move it there now. - * Do this with the lock held for update, but - * do statistics after releasing the lock. + /* The inode is being torn down, pause and + * try again. */ - version = ih->ih_version; - read_unlock(&ih->ih_lock); - xfs_ihash_promote(ih, ip, version); - XFS_STATS_INC(xs_ig_found); + if (inode->i_state & (I_FREEING | I_CLEAR)) { + read_unlock(&pag->pag_ici_lock); + delay(1); + XFS_STATS_INC(xs_ig_frecycle); -finish_inode: - if (ip->i_d.di_mode == 0) { - if (!(flags & XFS_IGET_CREATE)) - return ENOENT; - xfs_iocore_inode_reinit(ip); + goto again; } +/* Chances are the other vnode (the one in the inode) is being torn +* down right now, and we landed on top of it. Question is, what do +* we do? Unhook the old inode and hook up the new one? +*/ + cmn_err(CE_PANIC, + "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", + inode_vp, vp); + } - if (lock_flags != 0) - xfs_ilock(ip, lock_flags); + /* + * Inode cache hit + */ + read_unlock(&pag->pag_ici_lock); + XFS_STATS_INC(xs_ig_found); - xfs_iflags_clear(ip, XFS_ISTALE); - vn_trace_exit(vp, "xfs_iget.found", - (inst_t *)__return_address); - goto return_ip; +finish_inode: + if (ip->i_d.di_mode == 0) { + if (!(flags & XFS_IGET_CREATE)) { + xfs_put_perag(mp, pag); + return ENOENT; + } + xfs_iocore_inode_reinit(ip); } + + if (lock_flags != 0) + xfs_ilock(ip, lock_flags); + + xfs_iflags_clear(ip, XFS_ISTALE); + vn_trace_exit(ip, "xfs_iget.found", + (inst_t *)__return_address); + goto return_ip; } /* - * Inode cache miss: save the hash chain version stamp and unlock - * the chain, so we don't deadlock in vn_alloc. + * Inode cache miss */ + read_unlock(&pag->pag_ici_lock); XFS_STATS_INC(xs_ig_missed); - version = ih->ih_version; - - read_unlock(&ih->ih_lock); - /* * Read the disk inode attributes into a new inode structure and get * a new vnode for it. This should also initialize i_ino and i_mount. */ error = xfs_iread(mp, tp, ino, &ip, bno, (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0); - if (error) + if (error) { + xfs_put_perag(mp, pag); return error; + } - vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address); + vn_trace_exit(ip, "xfs_iget.alloc", (inst_t *)__return_address); xfs_inode_lock_init(ip, vp); xfs_iocore_inode_init(ip); - if (lock_flags) xfs_ilock(ip, lock_flags); if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { xfs_idestroy(ip); + xfs_put_perag(mp, pag); return ENOENT; } /* - * Put ip on its hash chain, unless someone else hashed a duplicate - * after we released the hash lock. + * This is a bit messy - we preallocate everything we _might_ + * need before we pick up the ici lock. That way we don't have to + * juggle locks and go all the way back to the start. */ - write_lock(&ih->ih_lock); + new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP); + if (radix_tree_preload(GFP_KERNEL)) { + delay(1); + goto again; + } + mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); + first_index = agino & mask; + write_lock(&pag->pag_ici_lock); - if (ih->ih_version != version) { - for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) { - if (iq->i_ino == ino) { - write_unlock(&ih->ih_lock); - xfs_idestroy(ip); + /* + * Find the cluster if it exists + */ + icl = NULL; + if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq, + first_index, 1)) { + if ((iq->i_ino & mask) == first_index) + icl = iq->i_cluster; + } - XFS_STATS_INC(xs_ig_dup); - goto again; - } - } + /* + * insert the new inode + */ + error = radix_tree_insert(&pag->pag_ici_root, agino, ip); + if (unlikely(error)) { + BUG_ON(error != -EEXIST); + write_unlock(&pag->pag_ici_lock); + radix_tree_preload_end(); + xfs_idestroy(ip); + XFS_STATS_INC(xs_ig_dup); + goto again; } /* * These values _must_ be set before releasing ihlock! */ - ip->i_hash = ih; - if ((iq = ih->ih_next)) { - iq->i_prevp = &ip->i_next; - } - ip->i_next = iq; - ip->i_prevp = &ih->ih_next; - ih->ih_next = ip; ip->i_udquot = ip->i_gdquot = NULL; - ih->ih_version++; xfs_iflags_set(ip, XFS_INEW); - write_unlock(&ih->ih_lock); - /* - * put ip on its cluster's hash chain - */ - ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL && - ip->i_cnext == NULL); - - chlnew = NULL; - ch = XFS_CHASH(mp, ip->i_blkno); - chlredo: - s = mutex_spinlock(&ch->ch_lock); - for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) { - if (chl->chl_blkno == ip->i_blkno) { - - /* insert this inode into the doubly-linked list - * where chl points */ - if ((iq = chl->chl_ip)) { - ip->i_cprev = iq->i_cprev; - iq->i_cprev->i_cnext = ip; - iq->i_cprev = ip; - ip->i_cnext = iq; - } else { - ip->i_cnext = ip; - ip->i_cprev = ip; - } - chl->chl_ip = ip; - ip->i_chash = chl; - break; - } - } + ASSERT(ip->i_cluster == NULL); - /* no hash list found for this block; add a new hash list */ - if (chl == NULL) { - if (chlnew == NULL) { - mutex_spinunlock(&ch->ch_lock, s); - ASSERT(xfs_chashlist_zone != NULL); - chlnew = (xfs_chashlist_t *) - kmem_zone_alloc(xfs_chashlist_zone, - KM_SLEEP); - ASSERT(chlnew != NULL); - goto chlredo; - } else { - ip->i_cnext = ip; - ip->i_cprev = ip; - ip->i_chash = chlnew; - chlnew->chl_ip = ip; - chlnew->chl_blkno = ip->i_blkno; - if (ch->ch_list) - ch->ch_list->chl_prev = chlnew; - chlnew->chl_next = ch->ch_list; - chlnew->chl_prev = NULL; - ch->ch_list = chlnew; - chlnew = NULL; - } + if (!icl) { + spin_lock_init(&new_icl->icl_lock); + INIT_HLIST_HEAD(&new_icl->icl_inodes); + icl = new_icl; + new_icl = NULL; } else { - if (chlnew != NULL) { - kmem_zone_free(xfs_chashlist_zone, chlnew); - } + ASSERT(!hlist_empty(&icl->icl_inodes)); } + spin_lock(&icl->icl_lock); + hlist_add_head(&ip->i_cnode, &icl->icl_inodes); + ip->i_cluster = icl; + spin_unlock(&icl->icl_lock); - mutex_spinunlock(&ch->ch_lock, s); - + write_unlock(&pag->pag_ici_lock); + radix_tree_preload_end(); + if (new_icl) + kmem_zone_free(xfs_icluster_zone, new_icl); /* * Link ip to its mount and thread it on the mount's inode list. @@ -478,6 +327,7 @@ finish_inode: mp->m_inodes = ip; XFS_MOUNT_IUNLOCK(mp); + xfs_put_perag(mp, pag); return_ip: ASSERT(ip->i_df.if_ext_max == @@ -486,13 +336,14 @@ finish_inode: ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); + xfs_iflags_set(ip, XFS_IMODIFIED); *ipp = ip; /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ - bhv_vfs_init_vnode(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1); + bhv_vfs_init_vnode(XFS_MTOVFS(mp), vp, ip, 1); return 0; } @@ -570,8 +421,8 @@ xfs_inode_lock_init( bhv_vnode_t *vp) { mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, - "xfsino", (long)vp->v_number); - mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number); + "xfsino", ip->i_ino); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); init_waitqueue_head(&ip->i_ipin_wait); atomic_set(&ip->i_pincount, 0); initnsema(&ip->i_flock, 1, "xfsfino"); @@ -587,32 +438,19 @@ xfs_inode_incore(xfs_mount_t *mp, xfs_ino_t ino, xfs_trans_t *tp) { - xfs_ihash_t *ih; xfs_inode_t *ip; - ulong version; - - ih = XFS_IHASH(mp, ino); - read_lock(&ih->ih_lock); - for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) { - if (ip->i_ino == ino) { - /* - * If we find it and tp matches, return it. - * Also move it to the front of the hash list - * if we find it and it is not already there. - * Otherwise break from the loop and return - * NULL. - */ - if (ip->i_transp == tp) { - version = ih->ih_version; - read_unlock(&ih->ih_lock); - xfs_ihash_promote(ih, ip, version); - return (ip); - } - break; - } - } - read_unlock(&ih->ih_lock); - return (NULL); + xfs_perag_t *pag; + + pag = xfs_get_perag(mp, ino); + read_lock(&pag->pag_ici_lock); + ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino)); + read_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); + + /* the returned inode must match the transaction */ + if (ip && (ip->i_transp != tp)) + return NULL; + return ip; } /* @@ -629,7 +467,7 @@ xfs_iput(xfs_inode_t *ip, { bhv_vnode_t *vp = XFS_ITOV(ip); - vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address); + vn_trace_entry(ip, "xfs_iput", (inst_t *)__return_address); xfs_iunlock(ip, lock_flags); VN_RELE(vp); } @@ -644,7 +482,7 @@ xfs_iput_new(xfs_inode_t *ip, bhv_vnode_t *vp = XFS_ITOV(ip); struct inode *inode = vn_to_inode(vp); - vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); + vn_trace_entry(ip, "xfs_iput_new", (inst_t *)__return_address); if ((ip->i_d.di_mode == 0)) { ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); @@ -699,7 +537,8 @@ xfs_ireclaim(xfs_inode_t *ip) */ vp = XFS_ITOV_NULL(ip); if (vp) { - vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); + vn_to_inode(vp)->i_private = NULL; + ip->i_vnode = NULL; } /* @@ -718,58 +557,26 @@ void xfs_iextract( xfs_inode_t *ip) { - xfs_ihash_t *ih; + xfs_mount_t *mp = ip->i_mount; + xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); xfs_inode_t *iq; - xfs_mount_t *mp; - xfs_chash_t *ch; - xfs_chashlist_t *chl, *chm; - SPLDECL(s); - - ih = ip->i_hash; - write_lock(&ih->ih_lock); - if ((iq = ip->i_next)) { - iq->i_prevp = ip->i_prevp; - } - *ip->i_prevp = iq; - ih->ih_version++; - write_unlock(&ih->ih_lock); + + write_lock(&pag->pag_ici_lock); + radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); + write_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); /* - * Remove from cluster hash list - * 1) delete the chashlist if this is the last inode on the chashlist - * 2) unchain from list of inodes - * 3) point chashlist->chl_ip to 'chl_next' if to this inode. + * Remove from cluster list */ mp = ip->i_mount; - ch = XFS_CHASH(mp, ip->i_blkno); - s = mutex_spinlock(&ch->ch_lock); - - if (ip->i_cnext == ip) { - /* Last inode on chashlist */ - ASSERT(ip->i_cnext == ip && ip->i_cprev == ip); - ASSERT(ip->i_chash != NULL); - chm=NULL; - chl = ip->i_chash; - if (chl->chl_prev) - chl->chl_prev->chl_next = chl->chl_next; - else - ch->ch_list = chl->chl_next; - if (chl->chl_next) - chl->chl_next->chl_prev = chl->chl_prev; - kmem_zone_free(xfs_chashlist_zone, chl); - } else { - /* delete one inode from a non-empty list */ - iq = ip->i_cnext; - iq->i_cprev = ip->i_cprev; - ip->i_cprev->i_cnext = iq; - if (ip->i_chash->chl_ip == ip) { - ip->i_chash->chl_ip = iq; - } - ip->i_chash = __return_address; - ip->i_cprev = __return_address; - ip->i_cnext = __return_address; - } - mutex_spinunlock(&ch->ch_lock, s); + spin_lock(&ip->i_cluster->icl_lock); + hlist_del(&ip->i_cnode); + spin_unlock(&ip->i_cluster->icl_lock); + + /* was last inode in cluster? */ + if (hlist_empty(&ip->i_cluster->icl_inodes)) + kmem_zone_free(xfs_icluster_zone, ip->i_cluster); /* * Remove from mount's inode list. @@ -879,17 +686,17 @@ xfs_ilock(xfs_inode_t *ip, (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); if (lock_flags & XFS_IOLOCK_EXCL) { - mrupdate(&ip->i_iolock); + mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); } else if (lock_flags & XFS_IOLOCK_SHARED) { - mraccess(&ip->i_iolock); + mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); } if (lock_flags & XFS_ILOCK_EXCL) { - mrupdate(&ip->i_lock); + mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); } else if (lock_flags & XFS_ILOCK_SHARED) { - mraccess(&ip->i_lock); + mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); } xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); } @@ -923,7 +730,7 @@ xfs_ilock_nowait(xfs_inode_t *ip, (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); iolocked = 0; if (lock_flags & XFS_IOLOCK_EXCL) { @@ -983,7 +790,8 @@ xfs_iunlock(xfs_inode_t *ip, (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); - ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0); + ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | + XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {