void *cpuaddr;
 
 #ifdef USE_RBPS_POOL 
-               cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
+               cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
                if (cpuaddr == NULL)
                        return -ENOMEM;
 #else
                void *cpuaddr;
 
 #ifdef USE_RBPL_POOL
-               cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|SLAB_DMA, &dma_handle);
+               cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
                if (cpuaddr == NULL)
                        return -ENOMEM;
 #else
        struct he_tpd *tpd;
        dma_addr_t dma_handle; 
 
-       tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|SLAB_DMA, &dma_handle);
+       tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
        if (tpd == NULL)
                return NULL;
                        
 
                dst = page_address(bv->bv_page) + bv->bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
-                                                     SLAB_DMA | __GFP_NOWARN);
+                                                     GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
                                memcpy(copy + bv->bv_offset, dst, bv->bv_len);
                        if (copy)
 
                dst = page_address(bv->bv_page) + bv->bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
-                                                     SLAB_DMA | __GFP_NOWARN);
+                                                     GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
                                memcpy(copy + bv->bv_offset, dst, bv->bv_len);
                        if (copy)
 
 }
 
 
-/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
+/* sometimes alloc/free could use kmalloc with GFP_DMA, for
  * better sharing and to leverage mm/slab.c intelligence.
  */
 
 
 #include       <asm/page.h>            /* kmalloc_sizes.h needs PAGE_SIZE */
 #include       <asm/cache.h>           /* kmalloc_sizes.h needs L1_CACHE_BYTES */
 
-/* flags for kmem_cache_alloc() */
-#define        SLAB_DMA                GFP_DMA
-
 /* flags to pass to kmem_cache_create().
  * The first 3 are only valid when the allocator as been build
  * SLAB_DEBUG_SUPPORT.
 
 
 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
-       if (flags & SLAB_DMA)
+       if (flags & GFP_DMA)
                BUG_ON(!(cachep->gfpflags & GFP_DMA));
        else
                BUG_ON(cachep->gfpflags & GFP_DMA);
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       BUG_ON(flags & ~(SLAB_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
+       BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
        if (flags & __GFP_NO_GROW)
                return 0;