]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/mips/mm/dma-default.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-omap-h63xx.git] / arch / mips / mm / dma-default.c
index 98b5e5bac02e8b25ad7429a28b5548629cbee407..ae76795685cc2ee2789596dfbdfeba6c40b47808 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/scatterlist.h>
 #include <linux/string.h>
 
 #include <asm/cache.h>
@@ -39,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
               current_cpu_type() == CPU_R12000);
 }
 
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+       /* ignore region specifiers */
+       gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA32
+       if (dev == NULL)
+               gfp |= __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+               gfp |= __GFP_DMA;
+       else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+               gfp |= __GFP_DMA32;
+       else
+#endif
+               ;
+
+       /* Don't invoke OOM killer */
+       gfp |= __GFP_NORETRY;
+
+       return gfp;
+}
+
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
        dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret != NULL) {
@@ -66,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret) {
@@ -165,12 +185,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
        for (i = 0; i < nents; i++, sg++) {
                unsigned long addr;
 
-               addr = (unsigned long) page_address(sg->page);
+               addr = (unsigned long) sg_virt(sg);
                if (!plat_device_is_coherent(dev) && addr)
-                       __dma_sync(addr + sg->offset, sg->length, direction);
+                       __dma_sync(addr, sg->length, direction);
                sg->dma_address = plat_map_dma_mem(dev,
-                                                  (void *)(addr + sg->offset),
-                                                  sg->length);
+                                                  (void *)addr, sg->length);
        }
 
        return nents;
@@ -223,10 +242,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
        for (i = 0; i < nhwentries; i++, sg++) {
                if (!plat_device_is_coherent(dev) &&
                    direction != DMA_TO_DEVICE) {
-                       addr = (unsigned long) page_address(sg->page);
+                       addr = (unsigned long) sg_virt(sg);
                        if (addr)
-                               __dma_sync(addr + sg->offset, sg->length,
-                                          direction);
+                               __dma_sync(addr, sg->length, direction);
                }
                plat_unmap_dma_mem(sg->dma_address);
        }
@@ -304,7 +322,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for (i = 0; i < nelems; i++, sg++) {
                if (cpu_is_noncoherent_r10000(dev))
-                       __dma_sync((unsigned long)page_address(sg->page),
+                       __dma_sync((unsigned long)page_address(sg_page(sg)),
                                   sg->length, direction);
                plat_unmap_dma_mem(sg->dma_address);
        }
@@ -322,7 +340,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
        /* Make sure that gcc doesn't leave the empty loop body.  */
        for (i = 0; i < nelems; i++, sg++) {
                if (!plat_device_is_coherent(dev))
-                       __dma_sync((unsigned long)page_address(sg->page),
+                       __dma_sync((unsigned long)page_address(sg_page(sg)),
                                   sg->length, direction);
                plat_unmap_dma_mem(sg->dma_address);
        }
@@ -344,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
         * so we can't guarantee allocations that must be
         * within a tighter range than GFP_DMA..
         */
-       if (mask < 0x00ffffff)
+       if (mask < DMA_BIT_MASK(24))
                return 0;
 
        return 1;