* Dummy noncoherent implementation. We don't provide a dma_cache_sync
* function so drivers using this API are highlighted with build warnings.
*/
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
{
return NULL;
}
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle)
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
{
}
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
-extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle);
+extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
/**
* dma_mmap_coherent - map a coherent DMA allocation into user space
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size);
+int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
/**
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
-extern void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
+ gfp_t);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size);
+int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
+
+
+#ifdef CONFIG_DMABOUNCE
+/*
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+ *
+ * On the SA-1111, a bug limits DMA to only certain regions of RAM.
+ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
+ * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
+ *
+ * The following are helper functions used by the dmabounce subystem
+ *
+ */
+
+/**
+ * dmabounce_register_dev
+ *
+ * @dev: valid struct device pointer
+ * @small_buf_size: size of buffers to use with small buffer pool
+ * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ *
+ * This function should be called by low-level platform code to register
+ * a device as requireing DMA buffer bouncing. The function will allocate
+ * appropriate DMA pools for the device.
+ *
+ */
+extern int dmabounce_register_dev(struct device *, unsigned long,
+ unsigned long);
+
+/**
+ * dmabounce_unregister_dev
+ *
+ * @dev: valid struct device pointer
+ *
+ * This function should be called by low-level platform code when device
+ * that was previously registered with dmabounce_register_dev is removed
+ * from the system.
+ *
+ */
+extern void dmabounce_unregister_dev(struct device *);
+
+/**
+ * dma_needs_bounce
+ *
+ * @dev: valid struct device pointer
+ * @dma_handle: dma_handle of unbounced buffer
+ * @size: size of region being mapped
+ *
+ * Platforms that utilize the dmabounce mechanism must implement
+ * this function.
+ *
+ * The dmabounce routines call this function whenever a dma-mapping
+ * is requested to determine whether a given buffer needs to be bounced
+ * or not. The function must return 0 if the buffer is OK for
+ * DMA access and 1 if the buffer needs to be bounced.
+ *
+ */
+extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
+
+/*
+ * The DMA API, implemented by dmabounce.c. See below for descriptions.
+ */
+extern dma_addr_t dma_map_single(struct device *, void *, size_t,
+ enum dma_data_direction);
+extern dma_addr_t dma_map_page(struct device *, struct page *,
+ unsigned long, size_t, enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
+
+/*
+ * Private functions
+ */
+int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+#else
+#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
+#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
/**
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
*/
-#ifndef CONFIG_DMABOUNCE
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
+
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
-#else
-extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
-#endif
/**
* dma_map_page - map a portion of a page for streaming DMA
* can regain ownership by calling dma_unmap_page() or
* dma_sync_single_for_cpu().
*/
-#ifndef CONFIG_DMABOUNCE
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
+
if (!arch_is_coherent())
dma_cache_maint(page_address(page) + offset, size, dir);
return page_to_dma(dev, page) + offset;
}
-#else
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir);
-#endif
/**
* dma_unmap_single - unmap a single buffer previously mapped
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
/* nothing to do */
}
-#else
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
-#endif
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
dma_unmap_single(dev, handle, size, dir);
}
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
*/
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ BUG_ON(!valid_dma_direction(dir));
+
+ dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
}
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
+ return;
+
if (!arch_is_coherent())
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
-#else
-extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
-extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
-#endif
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}
/*
* The scatter list versions of the above methods.
*/
-extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
-
-
-#ifdef CONFIG_DMABOUNCE
-/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- *
- */
-extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
+extern int dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
- */
-extern void dmabounce_unregister_dev(struct device *);
-
-/**
- * dma_needs_bounce
- *
- * @dev: valid struct device pointer
- * @dma_handle: dma_handle of unbounced buffer
- * @size: size of region being mapped
- *
- * Platforms that utilize the dmabounce mechanism must implement
- * this function.
- *
- * The dmabounce routines call this function whenever a dma-mapping
- * is requested to determine whether a given buffer needs to be bounced
- * or not. The function must return 0 if the buffer is OK for
- * DMA access and 1 if the buffer needs to be bounced.
- *
- */
-extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */
#endif