An assorted set of casts to get rid of the warnings on 32-bit archs.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
        ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
        if (ret < 0) {
                printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
-                      ret, BTRFS_LEAF_DATA_SIZE(root),
+                      ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
                       leaf_space_used(leaf, 0, nritems), nritems);
        }
        return ret;
 
        if (ret)
                return NULL;
 
-       block_group = (struct btrfs_block_group_cache *)ptr;
+       block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
 
 
        if (block_group->key.objectid <= bytenr && bytenr <=
                if (ret)
                        break;
 
-               cache = (struct btrfs_block_group_cache *)ptr;
+               cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
                last = cache->key.objectid + cache->key.offset;
                used = btrfs_block_group_used(&cache->item);
 
                if (ret)
                        break;
 
-               cache = (struct btrfs_block_group_cache *)ptr;
+               cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
                err = write_one_cache_group(trans, root,
                                            path, cache);
                /*
                                found_key.objectid + found_key.offset - 1,
                                bit | EXTENT_LOCKED, GFP_NOFS);
                set_state_private(block_group_cache, found_key.objectid,
-                                 (u64)cache);
+                                 (unsigned long)cache);
 
                if (key.objectid >=
                    btrfs_super_total_bytes(&info->super_copy))
 
 
                cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
                                               src_off_in_page));
-               cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
-                                              dst_off_in_page));
+               cur = min_t(unsigned long, cur,
+                       (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
 
                copy_pages(extent_buffer_page(dst, dst_i),
                           extent_buffer_page(dst, src_i),
                if (dst_i == 0)
                        dst_off_in_page += start_offset;
 
-               cur = min(len, src_off_in_page + 1);
+               cur = min_t(unsigned long, len, src_off_in_page + 1);
                cur = min(cur, dst_off_in_page + 1);
                move_pages(extent_buffer_page(dst, dst_i),
                           extent_buffer_page(dst, src_i),
 
        while (size > 0) {
                page = pages[i];
                kaddr = kmap_atomic(page, KM_USER0);
-               cur_size = min(PAGE_CACHE_SIZE - page_offset, size);
+               cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
                write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
                kunmap_atomic(kaddr, KM_USER0);
                page_offset = 0;
 
                        extent_start;
                ptr = btrfs_file_extent_inline_start(item) + extent_offset;
                map = kmap(page);
-               copy_size = min(PAGE_CACHE_SIZE - page_offset,
+               copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
                                size - extent_offset);
 
                em->block_start = EXTENT_MAP_INLINE;