]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-sh/dma-mapping.h
lguest: net block unneeded receive queue update notifications
[linux-2.6-omap-h63xx.git] / include / asm-sh / dma-mapping.h
index 6f492ac3fa136b8f9fdd55e91e0c21fc9633235b..6c0b8a2de14323d63c57d45dc17fa3d1bc41d950 100644 (file)
@@ -2,17 +2,12 @@
 #define __ASM_SH_DMA_MAPPING_H
 
 #include <linux/mm.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
 extern struct bus_type pci_bus_type;
 
-/* arch/sh/mm/consistent.c */
-extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
-extern void consistent_free(void *vaddr, size_t size);
-extern void consistent_sync(void *vaddr, size_t size, int direction);
-
 #define dma_supported(dev, mask)       (1)
 
 static inline int dma_set_mask(struct device *dev, u64 mask)
@@ -25,44 +20,19 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
        return 0;
 }
 
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t flag)
-{
-       if (sh_mv.mv_consistent_alloc) {
-               void *ret;
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *dma_handle, gfp_t flag);
 
-               ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
-               if (ret != NULL)
-                       return ret;
-       }
+void dma_free_coherent(struct device *dev, size_t size,
+                      void *vaddr, dma_addr_t dma_handle);
 
-       return consistent_alloc(flag, size, dma_handle);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
-                      void *vaddr, dma_addr_t dma_handle)
-{
-       if (sh_mv.mv_consistent_free) {
-               int ret;
-
-               ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
-               if (ret == 0)
-                       return;
-       }
-
-       consistent_free(vaddr, size);
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+                   enum dma_data_direction dir);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 #define dma_is_consistent(d, h) (1)
 
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-                                 enum dma_data_direction dir)
-{
-       consistent_sync(vaddr, size, (int)dir);
-}
-
 static inline dma_addr_t dma_map_single(struct device *dev,
                                        void *ptr, size_t size,
                                        enum dma_data_direction dir)
@@ -85,10 +55,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
 
        for (i = 0; i < nents; i++) {
 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
-               dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
-                              sg[i].length, dir);
+               dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
 #endif
-               sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+               sg[i].dma_address = sg_phys(&sg[i]);
        }
 
        return nents;
@@ -138,10 +107,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
 
        for (i = 0; i < nelems; i++) {
 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
-               dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
-                              sg[i].length, dir);
+               dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
 #endif
-               sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+               sg[i].dma_address = sg_phys(&sg[i]);
        }
 }
 
@@ -160,6 +128,25 @@ static inline void dma_sync_single_for_device(struct device *dev,
        dma_sync_single(dev, dma_handle, size, dir);
 }
 
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+                                                dma_addr_t dma_handle,
+                                                unsigned long offset,
+                                                size_t size,
+                                                enum dma_data_direction direction)
+{
+       dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+                                                   dma_addr_t dma_handle,
+                                                   unsigned long offset,
+                                                   size_t size,
+                                                   enum dma_data_direction direction)
+{
+       dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
+}
+
+
 static inline void dma_sync_sg_for_cpu(struct device *dev,
                                       struct scatterlist *sg, int nelems,
                                       enum dma_data_direction dir)
@@ -184,8 +171,22 @@ static inline int dma_get_cache_alignment(void)
        return L1_CACHE_BYTES;
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == 0;
 }
+
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+                           dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+                                 dma_addr_t device_addr, size_t size);
+
 #endif /* __ASM_SH_DMA_MAPPING_H */