struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
-                               set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
 
        return base + ZONE_NORMAL;
 }
 
-static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
-{
-       BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
-       return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
-}
-
 /*
  * There is only one page-allocator function, and two main namespaces to
  * it. The alloc_page*() variants return 'struct page *' and as such
 
                }
        }
        if (ret == NULL)
-               ret = kmem_cache_alloc(radix_tree_node_cachep,
-                               set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+               ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 
        BUG_ON(radix_tree_is_indirect_ptr(ret));
        return ret;
        rtp = &__get_cpu_var(radix_tree_preloads);
        while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
                preempt_enable();
-               node = kmem_cache_alloc(radix_tree_node_cachep,
-                               set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+               node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
                if (node == NULL)
                        goto out;
                preempt_disable();
 {
        radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
                        sizeof(struct radix_tree_node), 0,
-                       SLAB_PANIC, radix_tree_node_ctor);
+                       SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+                       radix_tree_node_ctor);
        radix_tree_init_maxindex();
        hotcpu_notifier(radix_tree_callback, 0);
 }