return 0;
 }
 
+/* allocate and map a coherent mapping */
+static void *
+gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
+                   gfp_t flag)
+{
+       void *vaddr;
+
+       vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
+       if (!vaddr)
+               return NULL;
+
+       *dma_addr = gart_map_single(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL);
+       if (*dma_addr != bad_dma_address)
+               return vaddr;
+
+       free_pages((unsigned long)vaddr, get_order(size));
+
+       return NULL;
+}
+
 static int no_agp;
 
 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
        .sync_sg_for_device             = NULL,
        .map_sg                         = gart_map_sg,
        .unmap_sg                       = gart_unmap_sg,
+       .alloc_coherent                 = gart_alloc_coherent,
 };
 
 void gart_iommu_shutdown(void)