EXPORT_SYMBOL(iommu_num_pages);
 #endif
 
+void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+                                dma_addr_t *dma_addr, gfp_t flag)
+{
+       unsigned long dma_mask;
+       struct page *page;
+       dma_addr_t addr;
+
+       dma_mask = dma_alloc_coherent_mask(dev, flag);
+
+       flag |= __GFP_ZERO;
+again:
+       page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+       if (!page)
+               return NULL;
+
+       addr = page_to_phys(page);
+       if (!is_buffer_dma_capable(dma_mask, addr, size)) {
+               __free_pages(page, get_order(size));
+
+               if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
+                       flag = (flag & ~GFP_DMA32) | GFP_DMA;
+                       goto again;
+               }
+
+               return NULL;
+       }
+
+       *dma_addr = addr;
+       return page_address(page);
+}
+
 /*
  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
  * documentation.
 
        return nents;
 }
 
-static void *
-nommu_alloc_coherent(struct device *hwdev, size_t size,
-                    dma_addr_t *dma_addr, gfp_t gfp)
-{
-       unsigned long dma_mask;
-       int node;
-       struct page *page;
-       dma_addr_t addr;
-
-       dma_mask = dma_alloc_coherent_mask(hwdev, gfp);
-
-       gfp |= __GFP_ZERO;
-
-       node = dev_to_node(hwdev);
-again:
-       page = alloc_pages_node(node, gfp, get_order(size));
-       if (!page)
-               return NULL;
-
-       addr = page_to_phys(page);
-       if (!is_buffer_dma_capable(dma_mask, addr, size) && !(gfp & GFP_DMA)) {
-               free_pages((unsigned long)page_address(page), get_order(size));
-               gfp |= GFP_DMA;
-               goto again;
-       }
-
-       if (check_addr("alloc_coherent", hwdev, addr, size)) {
-               *dma_addr = addr;
-               flush_write_buffers();
-               return page_address(page);
-       }
-
-       free_pages((unsigned long)page_address(page), get_order(size));
-
-       return NULL;
-}
-
 static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
                                dma_addr_t dma_addr)
 {
 }
 
 struct dma_mapping_ops nommu_dma_ops = {
-       .alloc_coherent = nommu_alloc_coherent,
+       .alloc_coherent = dma_generic_alloc_coherent,
        .free_coherent = nommu_free_coherent,
        .map_single = nommu_map_single,
        .map_sg = nommu_map_sg,
 
 extern int dma_supported(struct device *hwdev, u64 mask);
 extern int dma_set_mask(struct device *dev, u64 mask);
 
+extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+                                       dma_addr_t *dma_addr, gfp_t flag);
+
 static inline dma_addr_t
 dma_map_single(struct device *hwdev, void *ptr, size_t size,
               int direction)