]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/xfs/xfs_da_btree.c
[LLC]: Kill static inline llc_addrany
[linux-2.6-omap-h63xx.git] / fs / xfs / xfs_da_btree.c
index a68bc1f1a313c8862106adedf30ef84d65b92cd8..021a8f7e563f340df77bb75977ee397317d2d322 100644 (file)
@@ -511,12 +511,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                 * Move the req'd B-tree elements from high in node1 to
                 * low in node2.
                 */
-               be16_add(&node2->hdr.count, count);
+               be16_add_cpu(&node2->hdr.count, count);
                tmp = count * (uint)sizeof(xfs_da_node_entry_t);
                btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
                btree_d = &node2->btree[0];
                memcpy(btree_d, btree_s, tmp);
-               be16_add(&node1->hdr.count, -count);
+               be16_add_cpu(&node1->hdr.count, -count);
        } else {
                /*
                 * Move the req'd B-tree elements from low in node2 to
@@ -527,7 +527,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                btree_s = &node2->btree[0];
                btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
                memcpy(btree_d, btree_s, tmp);
-               be16_add(&node1->hdr.count, count);
+               be16_add_cpu(&node1->hdr.count, count);
                xfs_da_log_buf(tp, blk1->bp,
                        XFS_DA_LOGRANGE(node1, btree_d, tmp));
 
@@ -539,7 +539,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                btree_s = &node2->btree[count];
                btree_d = &node2->btree[0];
                memmove(btree_d, btree_s, tmp);
-               be16_add(&node2->hdr.count, -count);
+               be16_add_cpu(&node2->hdr.count, -count);
        }
 
        /*
@@ -604,7 +604,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
        btree->before = cpu_to_be32(newblk->blkno);
        xfs_da_log_buf(state->args->trans, oldblk->bp,
                XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
-       be16_add(&node->hdr.count, 1);
+       be16_add_cpu(&node->hdr.count, 1);
        xfs_da_log_buf(state->args->trans, oldblk->bp,
                XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
 
@@ -959,7 +959,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
        memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
        xfs_da_log_buf(state->args->trans, drop_blk->bp,
            XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
-       be16_add(&node->hdr.count, -1);
+       be16_add_cpu(&node->hdr.count, -1);
        xfs_da_log_buf(state->args->trans, drop_blk->bp,
            XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
 
@@ -1018,7 +1018,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
         */
        tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
        memcpy(btree, &drop_node->btree[0], tmp);
-       be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
+       be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
 
        xfs_da_log_buf(tp, save_blk->bp,
                XFS_DA_LOGRANGE(save_node, &save_node->hdr,
@@ -1090,8 +1090,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
                if (blk->magic == XFS_DA_NODE_MAGIC) {
                        node = blk->bp->data;
                        max = be16_to_cpu(node->hdr.count);
-                       btreehashval = node->btree[max-1].hashval;
-                       blk->hashval = be32_to_cpu(btreehashval);
+                       blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
 
                        /*
                         * Binary search.  (note: small blocks will skip loop)
@@ -1976,7 +1975,6 @@ xfs_da_do_buf(
                error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
                if (unlikely(error == EFSCORRUPTED)) {
                        if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
-                               int     i;
                                cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n",
                                        (long long)bno);
                                cmn_err(CE_ALERT, "dir: inode %lld\n",
@@ -2166,21 +2164,6 @@ xfs_da_reada_buf(
                return rval;
 }
 
-/*
- * Calculate the number of bits needed to hold i different values.
- */
-uint
-xfs_da_log2_roundup(uint i)
-{
-       uint rval;
-
-       for (rval = 0; rval < NBBY * sizeof(i); rval++) {
-               if ((1 << rval) >= i)
-                       break;
-       }
-       return(rval);
-}
-
 kmem_zone_t *xfs_da_state_zone;        /* anchor for state struct zone */
 kmem_zone_t *xfs_dabuf_zone;           /* dabuf zone */
 
@@ -2235,7 +2218,7 @@ xfs_da_state_free(xfs_da_state_t *state)
 
 #ifdef XFS_DABUF_DEBUG
 xfs_dabuf_t    *xfs_dabuf_global_list;
-lock_t         xfs_dabuf_global_lock;
+spinlock_t     xfs_dabuf_global_lock;
 #endif
 
 /*
@@ -2281,10 +2264,9 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
        }
 #ifdef XFS_DABUF_DEBUG
        {
-               SPLDECL(s);
                xfs_dabuf_t     *p;
 
-               s = mutex_spinlock(&xfs_dabuf_global_lock);
+               spin_lock(&xfs_dabuf_global_lock);
                for (p = xfs_dabuf_global_list; p; p = p->next) {
                        ASSERT(p->blkno != dabuf->blkno ||
                               p->target != dabuf->target);
@@ -2294,7 +2276,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
                        xfs_dabuf_global_list->prev = dabuf;
                dabuf->next = xfs_dabuf_global_list;
                xfs_dabuf_global_list = dabuf;
-               mutex_spinunlock(&xfs_dabuf_global_lock, s);
+               spin_unlock(&xfs_dabuf_global_lock);
        }
 #endif
        return dabuf;
@@ -2336,16 +2318,14 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
                kmem_free(dabuf->data, BBTOB(dabuf->bbcount));
 #ifdef XFS_DABUF_DEBUG
        {
-               SPLDECL(s);
-
-               s = mutex_spinlock(&xfs_dabuf_global_lock);
+               spin_lock(&xfs_dabuf_global_lock);
                if (dabuf->prev)
                        dabuf->prev->next = dabuf->next;
                else
                        xfs_dabuf_global_list = dabuf->next;
                if (dabuf->next)
                        dabuf->next->prev = dabuf->prev;
-               mutex_spinunlock(&xfs_dabuf_global_lock, s);
+               spin_unlock(&xfs_dabuf_global_lock);
        }
        memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
 #endif