]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/powerpc/kernel/dma.c
Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-omap-h63xx.git] / arch / powerpc / kernel / dma.c
index ae5708e3a3127fc9a180ae1d881d5bea5ede48bc..3a6eaa876ee1169026837a52fcb119a95def9bd3 100644 (file)
  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  *
  * Provide default implementations of the DMA mapping callbacks for
- * directly mapped busses and busses using the iommu infrastructure
+ * directly mapped busses.
  */
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <asm/bug.h>
-#include <asm/iommu.h>
 #include <asm/abs_addr.h>
 
-/*
- * Generic iommu implementation
- */
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
-                                     dma_addr_t *dma_handle, gfp_t flag)
-{
-       return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
-                                   dma_handle, device_to_mask(dev), flag,
-                                   dev->archdata.numa_node);
-}
-
-static void dma_iommu_free_coherent(struct device *dev, size_t size,
-                                   void *vaddr, dma_addr_t dma_handle)
-{
-       iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer.  The user buffer must be
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
-                                      size_t size,
-                                      enum dma_data_direction direction,
-                                      struct dma_attrs *attrs)
-{
-       return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
-                               device_to_mask(dev), direction, attrs);
-}
-
-
-static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
-                                  size_t size,
-                                  enum dma_data_direction direction,
-                                  struct dma_attrs *attrs)
-{
-       iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction,
-                          attrs);
-}
-
-
-static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
-                           int nelems, enum dma_data_direction direction,
-                           struct dma_attrs *attrs)
-{
-       return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
-                           device_to_mask(dev), direction, attrs);
-}
-
-static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction,
-               struct dma_attrs *attrs)
-{
-       iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction,
-                      attrs);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
-{
-       struct iommu_table *tbl = dev->archdata.dma_data;
-
-       if (!tbl || tbl->it_offset > mask) {
-               printk(KERN_INFO
-                      "Warning: IOMMU offset too big for device mask\n");
-               if (tbl)
-                       printk(KERN_INFO
-                              "mask: 0x%08lx, table offset: 0x%08lx\n",
-                               mask, tbl->it_offset);
-               else
-                       printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
-                               mask);
-               return 0;
-       } else
-               return 1;
-}
-
-struct dma_mapping_ops dma_iommu_ops = {
-       .alloc_coherent = dma_iommu_alloc_coherent,
-       .free_coherent  = dma_iommu_free_coherent,
-       .map_single     = dma_iommu_map_single,
-       .unmap_single   = dma_iommu_unmap_single,
-       .map_sg         = dma_iommu_map_sg,
-       .unmap_sg       = dma_iommu_unmap_sg,
-       .dma_supported  = dma_iommu_dma_supported,
-};
-EXPORT_SYMBOL(dma_iommu_ops);
-
 /*
  * Generic direct DMA implementation
  *
  * This implementation supports a per-device offset that can be applied if
  * the address at which memory is visible to devices is not 0. Platform code
  * can set archdata.dma_data to an unsigned long holding the offset. By
- * default the offset is zero.
+ * default the offset is PCI_DRAM_OFFSET.
  */
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
-       return (unsigned long)dev->archdata.dma_data;
+       if (dev)
+               return (unsigned long)dev->archdata.dma_data;
+
+       return PCI_DRAM_OFFSET;
 }
 
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-                                      dma_addr_t *dma_handle, gfp_t flag)
+void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+                               dma_addr_t *dma_handle, gfp_t flag)
 {
-       struct page *page;
        void *ret;
-       int node = dev->archdata.numa_node;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       ret = __dma_alloc_coherent(size, dma_handle, flag);
+       if (ret == NULL)
+               return NULL;
+       *dma_handle += get_dma_direct_offset(dev);
+       return ret;
+#else
+       struct page *page;
+       int node = dev_to_node(dev);
+
+       /* ignore region specifiers */
+       flag  &= ~(__GFP_HIGHMEM);
 
        page = alloc_pages_node(node, flag, get_order(size));
        if (page == NULL)
@@ -135,27 +52,17 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
        *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
 
        return ret;
+#endif
 }
 
-static void dma_direct_free_coherent(struct device *dev, size_t size,
-                                    void *vaddr, dma_addr_t dma_handle)
+void dma_direct_free_coherent(struct device *dev, size_t size,
+                             void *vaddr, dma_addr_t dma_handle)
 {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       __dma_free_coherent(size, vaddr);
+#else
        free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
-                                       size_t size,
-                                       enum dma_data_direction direction,
-                                       struct dma_attrs *attrs)
-{
-       return virt_to_abs(ptr) + get_dma_direct_offset(dev);
-}
-
-static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                                   size_t size,
-                                   enum dma_data_direction direction,
-                                   struct dma_attrs *attrs)
-{
+#endif
 }
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -168,6 +75,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
                sg->dma_length = sg->length;
+               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
        }
 
        return nents;
@@ -181,20 +89,44 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
 
 static int dma_direct_dma_supported(struct device *dev, u64 mask)
 {
+#ifdef CONFIG_PPC64
        /* Could be improved to check for memory though it better be
         * done via some global so platforms can set the limit in case
         * they have limited DMA windows
         */
        return mask >= DMA_32BIT_MASK;
+#else
+       return 1;
+#endif
+}
+
+static inline dma_addr_t dma_direct_map_page(struct device *dev,
+                                            struct page *page,
+                                            unsigned long offset,
+                                            size_t size,
+                                            enum dma_data_direction dir,
+                                            struct dma_attrs *attrs)
+{
+       BUG_ON(dir == DMA_NONE);
+       __dma_sync_page(page, offset, size, dir);
+       return page_to_phys(page) + offset + get_dma_direct_offset(dev);
+}
+
+static inline void dma_direct_unmap_page(struct device *dev,
+                                        dma_addr_t dma_address,
+                                        size_t size,
+                                        enum dma_data_direction direction,
+                                        struct dma_attrs *attrs)
+{
 }
 
 struct dma_mapping_ops dma_direct_ops = {
        .alloc_coherent = dma_direct_alloc_coherent,
        .free_coherent  = dma_direct_free_coherent,
-       .map_single     = dma_direct_map_single,
-       .unmap_single   = dma_direct_unmap_single,
        .map_sg         = dma_direct_map_sg,
        .unmap_sg       = dma_direct_unmap_sg,
        .dma_supported  = dma_direct_dma_supported,
+       .map_page       = dma_direct_map_page,
+       .unmap_page     = dma_direct_unmap_page,
 };
 EXPORT_SYMBOL(dma_direct_ops);