]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'linus' into core/iommu
authorIngo Molnar <mingo@elte.hu>
Thu, 5 Mar 2009 11:47:28 +0000 (12:47 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 5 Mar 2009 11:47:28 +0000 (12:47 +0100)
1  2 
arch/ia64/hp/common/sba_iommu.c
arch/x86/include/asm/dma-mapping.h
arch/x86/kernel/pci-gart_64.c
drivers/pci/intel-iommu.c
include/linux/intel-iommu.h

index 129b62eb39e5813457c464bb22b70c69740e63e4,6d5e6c5630e3b8a6efa5eccae0d3d5c10ece4516..56ceb68eb99d244fffc70feab344e337ec385f7d
@@@ -36,7 -36,6 +36,7 @@@
  #include <linux/bitops.h>         /* hweight64() */
  #include <linux/crash_dump.h>
  #include <linux/iommu-helper.h>
 +#include <linux/dma-mapping.h>
  
  #include <asm/delay.h>                /* ia64_get_itc() */
  #include <asm/io.h>
@@@ -907,15 -906,13 +907,15 @@@ sba_mark_invalid(struct ioc *ioc, dma_a
   * @dir:  R/W or both.
   * @attrs: optional dma attributes
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -dma_addr_t
 -sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
 -                   struct dma_attrs *attrs)
 +static dma_addr_t sba_map_page(struct device *dev, struct page *page,
 +                             unsigned long poff, size_t size,
 +                             enum dma_data_direction dir,
 +                             struct dma_attrs *attrs)
  {
        struct ioc *ioc;
 +      void *addr = page_address(page) + poff;
        dma_addr_t iovp;
        dma_addr_t offset;
        u64 *pdir_start;
  #endif
        return SBA_IOVA(ioc, iovp, offset);
  }
 -EXPORT_SYMBOL(sba_map_single_attrs);
 +
 +static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
 +                                     size_t size, enum dma_data_direction dir,
 +                                     struct dma_attrs *attrs)
 +{
 +      return sba_map_page(dev, virt_to_page(addr),
 +                          (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
 +}
  
  #ifdef ENABLE_MARK_CLEAN
  static SBA_INLINE void
@@@ -1034,10 -1024,10 +1034,10 @@@ sba_mark_clean(struct ioc *ioc, dma_add
   * @dir:  R/W or both.
   * @attrs: optional dma attributes
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
 -                          int dir, struct dma_attrs *attrs)
 +static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
 +                         enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        struct ioc *ioc;
  #if DELAYED_RESOURCE_CNT > 0
        spin_unlock_irqrestore(&ioc->res_lock, flags);
  #endif /* DELAYED_RESOURCE_CNT == 0 */
  }
 -EXPORT_SYMBOL(sba_unmap_single_attrs);
 +
 +void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
 +                          enum dma_data_direction dir, struct dma_attrs *attrs)
 +{
 +      sba_unmap_page(dev, iova, size, dir, attrs);
 +}
  
  /**
   * sba_alloc_coherent - allocate/map shared mem for DMA
   * @size:  number of bytes mapped in driver buffer.
   * @dma_handle:  IOVA of new buffer.
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -void *
 +static void *
  sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
  {
        struct ioc *ioc;
   * @vaddr:  virtual address IOVA of "consistent" buffer.
   * @dma_handler:  IO virtual address of "consistent" buffer.
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
 +static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
 +                             dma_addr_t dma_handle)
  {
        sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
        free_pages((unsigned long) vaddr, get_order(size));
@@@ -1436,11 -1420,10 +1436,11 @@@ sba_coalesce_chunks(struct ioc *ioc, st
   * @dir:  R/W or both.
   * @attrs: optional dma attributes
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
 -                   int dir, struct dma_attrs *attrs)
 +static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
 +                          int nents, enum dma_data_direction dir,
 +                          struct dma_attrs *attrs)
  {
        struct ioc *ioc;
        int coalesced, filled = 0;
  
        return filled;
  }
 -EXPORT_SYMBOL(sba_map_sg_attrs);
  
  /**
   * sba_unmap_sg_attrs - unmap Scatter/Gather list
   * @dir:  R/W or both.
   * @attrs: optional dma attributes
   *
-  * See Documentation/DMA-mapping.txt
+  * See Documentation/PCI/PCI-DMA-mapping.txt
   */
 -void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
 -                      int nents, int dir, struct dma_attrs *attrs)
 +static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
 +                             int nents, enum dma_data_direction dir,
 +                             struct dma_attrs *attrs)
  {
  #ifdef ASSERT_PDIR_SANITY
        struct ioc *ioc;
  #endif
  
  }
 -EXPORT_SYMBOL(sba_unmap_sg_attrs);
  
  /**************************************************************
  *
@@@ -2080,8 -2064,6 +2080,8 @@@ static struct acpi_driver acpi_sba_ioc_
        },
  };
  
 +extern struct dma_map_ops swiotlb_dma_ops;
 +
  static int __init
  sba_init(void)
  {
         * a successful kdump kernel boot is to use the swiotlb.
         */
        if (is_kdump_kernel()) {
 +              dma_ops = &swiotlb_dma_ops;
                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
                        panic("Unable to initialize software I/O TLB:"
                                  " Try machvec=dig boot option");
                 * If we didn't find something sba_iommu can claim, we
                 * need to setup the swiotlb and switch to the dig machvec.
                 */
 +              dma_ops = &swiotlb_dma_ops;
                if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
                        panic("Unable to find SBA IOMMU or initialize "
                              "software I/O TLB: Try machvec=dig boot option");
@@@ -2158,13 -2138,15 +2158,13 @@@ nosbagart(char *str
        return 1;
  }
  
 -int
 -sba_dma_supported (struct device *dev, u64 mask)
 +static int sba_dma_supported (struct device *dev, u64 mask)
  {
        /* make sure it's at least 32bit capable */
        return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  }
  
 -int
 -sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 +static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  {
        return 0;
  }
@@@ -2194,22 -2176,7 +2194,22 @@@ sba_page_override(char *str
  
  __setup("sbapagesize=",sba_page_override);
  
 -EXPORT_SYMBOL(sba_dma_mapping_error);
 -EXPORT_SYMBOL(sba_dma_supported);
 -EXPORT_SYMBOL(sba_alloc_coherent);
 -EXPORT_SYMBOL(sba_free_coherent);
 +struct dma_map_ops sba_dma_ops = {
 +      .alloc_coherent         = sba_alloc_coherent,
 +      .free_coherent          = sba_free_coherent,
 +      .map_page               = sba_map_page,
 +      .unmap_page             = sba_unmap_page,
 +      .map_sg                 = sba_map_sg_attrs,
 +      .unmap_sg               = sba_unmap_sg_attrs,
 +      .sync_single_for_cpu    = machvec_dma_sync_single,
 +      .sync_sg_for_cpu        = machvec_dma_sync_sg,
 +      .sync_single_for_device = machvec_dma_sync_single,
 +      .sync_sg_for_device     = machvec_dma_sync_sg,
 +      .dma_supported          = sba_dma_supported,
 +      .mapping_error          = sba_dma_mapping_error,
 +};
 +
 +void sba_dma_init(void)
 +{
 +      dma_ops = &sba_dma_ops;
 +}
index 5a347805a6c7ced5bb6c09e86d79d7663419d48c,132a134d12f24432a310f66485f6586387f618dd..9c78bd40ebec8ae1b6c7b3fed22192ea580fc8fa
@@@ -2,12 -2,11 +2,12 @@@
  #define _ASM_X86_DMA_MAPPING_H
  
  /*
-  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-  * documentation.
+  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
+  * Documentation/DMA-API.txt for documentation.
   */
  
  #include <linux/scatterlist.h>
 +#include <linux/dma-attrs.h>
  #include <asm/io.h>
  #include <asm/swiotlb.h>
  #include <asm-generic/dma-coherent.h>
@@@ -17,9 -16,47 +17,9 @@@ extern int iommu_merge
  extern struct device x86_dma_fallback_dev;
  extern int panic_on_overflow;
  
 -struct dma_mapping_ops {
 -      int             (*mapping_error)(struct device *dev,
 -                                       dma_addr_t dma_addr);
 -      void*           (*alloc_coherent)(struct device *dev, size_t size,
 -                              dma_addr_t *dma_handle, gfp_t gfp);
 -      void            (*free_coherent)(struct device *dev, size_t size,
 -                              void *vaddr, dma_addr_t dma_handle);
 -      dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
 -                              size_t size, int direction);
 -      void            (*unmap_single)(struct device *dev, dma_addr_t addr,
 -                              size_t size, int direction);
 -      void            (*sync_single_for_cpu)(struct device *hwdev,
 -                              dma_addr_t dma_handle, size_t size,
 -                              int direction);
 -      void            (*sync_single_for_device)(struct device *hwdev,
 -                              dma_addr_t dma_handle, size_t size,
 -                              int direction);
 -      void            (*sync_single_range_for_cpu)(struct device *hwdev,
 -                              dma_addr_t dma_handle, unsigned long offset,
 -                              size_t size, int direction);
 -      void            (*sync_single_range_for_device)(struct device *hwdev,
 -                              dma_addr_t dma_handle, unsigned long offset,
 -                              size_t size, int direction);
 -      void            (*sync_sg_for_cpu)(struct device *hwdev,
 -                              struct scatterlist *sg, int nelems,
 -                              int direction);
 -      void            (*sync_sg_for_device)(struct device *hwdev,
 -                              struct scatterlist *sg, int nelems,
 -                              int direction);
 -      int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
 -                              int nents, int direction);
 -      void            (*unmap_sg)(struct device *hwdev,
 -                              struct scatterlist *sg, int nents,
 -                              int direction);
 -      int             (*dma_supported)(struct device *hwdev, u64 mask);
 -      int             is_phys;
 -};
 -
 -extern struct dma_mapping_ops *dma_ops;
 -
 -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
 +extern struct dma_map_ops *dma_ops;
 +
 +static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  {
  #ifdef CONFIG_X86_32
        return dma_ops;
@@@ -34,7 -71,7 +34,7 @@@
  /* Make sure we keep the same behaviour */
  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(dev);
 +      struct dma_map_ops *ops = get_dma_ops(dev);
        if (ops->mapping_error)
                return ops->mapping_error(dev, dma_addr);
  
@@@ -53,139 -90,137 +53,139 @@@ extern void *dma_generic_alloc_coherent
  
  static inline dma_addr_t
  dma_map_single(struct device *hwdev, void *ptr, size_t size,
 -             int direction)
 +             enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 -      return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
 +      BUG_ON(!valid_dma_direction(dir));
 +      return ops->map_page(hwdev, virt_to_page(ptr),
 +                           (unsigned long)ptr & ~PAGE_MASK, size,
 +                           dir, NULL);
  }
  
  static inline void
  dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
 -               int direction)
 +               enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(dev);
 +      struct dma_map_ops *ops = get_dma_ops(dev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 -      if (ops->unmap_single)
 -              ops->unmap_single(dev, addr, size, direction);
 +      BUG_ON(!valid_dma_direction(dir));
 +      if (ops->unmap_page)
 +              ops->unmap_page(dev, addr, size, dir, NULL);
  }
  
  static inline int
  dma_map_sg(struct device *hwdev, struct scatterlist *sg,
 -         int nents, int direction)
 +         int nents, enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 -      return ops->map_sg(hwdev, sg, nents, direction);
 +      BUG_ON(!valid_dma_direction(dir));
 +      return ops->map_sg(hwdev, sg, nents, dir, NULL);
  }
  
  static inline void
  dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 -           int direction)
 +           enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->unmap_sg)
 -              ops->unmap_sg(hwdev, sg, nents, direction);
 +              ops->unmap_sg(hwdev, sg, nents, dir, NULL);
  }
  
  static inline void
  dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
 -                      size_t size, int direction)
 +                      size_t size, enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_single_for_cpu)
 -              ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
 +              ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
        flush_write_buffers();
  }
  
  static inline void
  dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
 -                         size_t size, int direction)
 +                         size_t size, enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_single_for_device)
 -              ops->sync_single_for_device(hwdev, dma_handle, size, direction);
 +              ops->sync_single_for_device(hwdev, dma_handle, size, dir);
        flush_write_buffers();
  }
  
  static inline void
  dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
 -                            unsigned long offset, size_t size, int direction)
 +                            unsigned long offset, size_t size,
 +                            enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_single_range_for_cpu)
                ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
 -                                             size, direction);
 +                                             size, dir);
        flush_write_buffers();
  }
  
  static inline void
  dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
                                 unsigned long offset, size_t size,
 -                               int direction)
 +                               enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_single_range_for_device)
                ops->sync_single_range_for_device(hwdev, dma_handle,
 -                                                offset, size, direction);
 +                                                offset, size, dir);
        flush_write_buffers();
  }
  
  static inline void
  dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 -                  int nelems, int direction)
 +                  int nelems, enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_sg_for_cpu)
 -              ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
 +              ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
        flush_write_buffers();
  }
  
  static inline void
  dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 -                     int nelems, int direction)
 +                     int nelems, enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 +      struct dma_map_ops *ops = get_dma_ops(hwdev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 +      BUG_ON(!valid_dma_direction(dir));
        if (ops->sync_sg_for_device)
 -              ops->sync_sg_for_device(hwdev, sg, nelems, direction);
 +              ops->sync_sg_for_device(hwdev, sg, nelems, dir);
  
        flush_write_buffers();
  }
  
  static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
                                      size_t offset, size_t size,
 -                                    int direction)
 +                                    enum dma_data_direction dir)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(dev);
 +      struct dma_map_ops *ops = get_dma_ops(dev);
  
 -      BUG_ON(!valid_dma_direction(direction));
 -      return ops->map_single(dev, page_to_phys(page) + offset,
 -                             size, direction);
 +      BUG_ON(!valid_dma_direction(dir));
 +      return ops->map_page(dev, page, offset, size, dir, NULL);
  }
  
  static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 -                                size_t size, int direction)
 +                                size_t size, enum dma_data_direction dir)
  {
 -      dma_unmap_single(dev, addr, size, direction);
 +      dma_unmap_single(dev, addr, size, dir);
  }
  
  static inline void
@@@ -231,7 -266,7 +231,7 @@@ static inline void 
  dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(dev);
 +      struct dma_map_ops *ops = get_dma_ops(dev);
        void *memory;
  
        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  static inline void dma_free_coherent(struct device *dev, size_t size,
                                     void *vaddr, dma_addr_t bus)
  {
 -      struct dma_mapping_ops *ops = get_dma_ops(dev);
 +      struct dma_map_ops *ops = get_dma_ops(dev);
  
        WARN_ON(irqs_disabled());       /* for portability */
  
index 8cb3e45439cf25b0738e09445386238073d8f7e6,d5768b1af08041160e52ea6b70a2f6cd4bb6e476..b284b58c035ccdd8fc850604cced3557ae7de2c5
@@@ -5,7 -5,7 +5,7 @@@
   * This allows to use PCI devices that only support 32bit addresses on systems
   * with more than 4GB.
   *
-  * See Documentation/DMA-mapping.txt for the interface specification.
+  * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
   *
   * Copyright 2002 Andi Kleen, SuSE Labs.
   * Subject to the GNU General Public License v2 only.
@@@ -255,13 -255,10 +255,13 @@@ static dma_addr_t dma_map_area(struct d
  }
  
  /* Map a single area into the IOMMU */
 -static dma_addr_t
 -gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
 +static dma_addr_t gart_map_page(struct device *dev, struct page *page,
 +                              unsigned long offset, size_t size,
 +                              enum dma_data_direction dir,
 +                              struct dma_attrs *attrs)
  {
        unsigned long bus;
 +      phys_addr_t paddr = page_to_phys(page) + offset;
  
        if (!dev)
                dev = &x86_dma_fallback_dev;
  /*
   * Free a DMA mapping.
   */
 -static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
 -                            size_t size, int direction)
 +static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
 +                          size_t size, enum dma_data_direction dir,
 +                          struct dma_attrs *attrs)
  {
        unsigned long iommu_page;
        int npages;
  /*
   * Wrapper for pci_unmap_single working with scatterlists.
   */
 -static void
 -gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 +static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 +                        enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        struct scatterlist *s;
        int i;
        for_each_sg(sg, s, nents, i) {
                if (!s->dma_length || !s->length)
                        break;
 -              gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
 +              gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
        }
  }
  
@@@ -333,7 -329,7 +333,7 @@@ static int dma_map_sg_nonforce(struct d
                        addr = dma_map_area(dev, addr, s->length, dir, 0);
                        if (addr == bad_dma_address) {
                                if (i > 0)
 -                                      gart_unmap_sg(dev, sg, i, dir);
 +                                      gart_unmap_sg(dev, sg, i, dir, NULL);
                                nents = 0;
                                sg[0].dma_length = 0;
                                break;
@@@ -404,8 -400,8 +404,8 @@@ dma_map_cont(struct device *dev, struc
   * DMA map all entries in a scatterlist.
   * Merge chunks that have page aligned sizes into a continuous mapping.
   */
 -static int
 -gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 +static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 +                     enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        struct scatterlist *s, *ps, *start_sg, *sgmap;
        int need = 0, nextneed, i, out, start;
  
  error:
        flush_gart();
 -      gart_unmap_sg(dev, sg, out, dir);
 +      gart_unmap_sg(dev, sg, out, dir, NULL);
  
        /* When it was forced or merged try again in a dumb way */
        if (force_iommu || iommu_merge) {
@@@ -525,7 -521,7 +525,7 @@@ static voi
  gart_free_coherent(struct device *dev, size_t size, void *vaddr,
                   dma_addr_t dma_addr)
  {
 -      gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
 +      gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
        free_pages((unsigned long)vaddr, get_order(size));
  }
  
@@@ -711,11 -707,11 +711,11 @@@ static __init int init_k8_gatt(struct a
        return -1;
  }
  
 -static struct dma_mapping_ops gart_dma_ops = {
 -      .map_single                     = gart_map_single,
 -      .unmap_single                   = gart_unmap_single,
 +static struct dma_map_ops gart_dma_ops = {
        .map_sg                         = gart_map_sg,
        .unmap_sg                       = gart_unmap_sg,
 +      .map_page                       = gart_map_page,
 +      .unmap_page                     = gart_unmap_page,
        .alloc_coherent                 = gart_alloc_coherent,
        .free_coherent                  = gart_free_coherent,
  };
index 628f8b72e530eec772fb5ad276d008271a9de7c1,f3f686581a9026685dc3e6c994b00e88ed88486c..e7d058aa7b0d35ef02792b7e08cbce26b8dd74e8
@@@ -61,6 -61,8 +61,8 @@@
  /* global iommu list, set NULL for ignored DMAR units */
  static struct intel_iommu **g_iommus;
  
+ static int rwbf_quirk;
  /*
   * 0: Present
   * 1-11: Reserved
@@@ -268,7 -270,12 +270,12 @@@ static long list_size
  
  static void domain_remove_dev_info(struct dmar_domain *domain);
  
- int dmar_disabled;
+ #ifdef CONFIG_DMAR_DEFAULT_ON
+ int dmar_disabled = 0;
+ #else
+ int dmar_disabled = 1;
+ #endif /*CONFIG_DMAR_DEFAULT_ON*/
  static int __initdata dmar_map_gfx = 1;
  static int dmar_forcedac;
  static int intel_iommu_strict;
@@@ -284,9 -291,12 +291,12 @@@ static int __init intel_iommu_setup(cha
        if (!str)
                return -EINVAL;
        while (*str) {
-               if (!strncmp(str, "off", 3)) {
+               if (!strncmp(str, "on", 2)) {
+                       dmar_disabled = 0;
+                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+               } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
-                       printk(KERN_INFO"Intel-IOMMU: disabled\n");
+                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
                        printk(KERN_INFO
@@@ -777,7 -787,7 +787,7 @@@ static void iommu_flush_write_buffer(st
        u32 val;
        unsigned long flag;
  
-       if (!cap_rwbf(iommu->cap))
+       if (!rwbf_quirk && !cap_rwbf(iommu->cap))
                return;
        val = iommu->gcmd | DMA_GCMD_WBF;
  
@@@ -2274,13 -2284,11 +2284,13 @@@ error
        return 0;
  }
  
 -dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
 -                          size_t size, int dir)
 +static dma_addr_t intel_map_page(struct device *dev, struct page *page,
 +                               unsigned long offset, size_t size,
 +                               enum dma_data_direction dir,
 +                               struct dma_attrs *attrs)
  {
 -      return __intel_map_single(hwdev, paddr, size, dir,
 -                                to_pci_dev(hwdev)->dma_mask);
 +      return __intel_map_single(dev, page_to_phys(page) + offset, size,
 +                                dir, to_pci_dev(dev)->dma_mask);
  }
  
  static void flush_unmaps(void)
@@@ -2344,9 -2352,8 +2354,9 @@@ static void add_unmap(struct dmar_domai
        spin_unlock_irqrestore(&async_umap_flush_lock, flags);
  }
  
 -void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
 -                      int dir)
 +static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
 +                           size_t size, enum dma_data_direction dir,
 +                           struct dma_attrs *attrs)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
        }
  }
  
 -void *intel_alloc_coherent(struct device *hwdev, size_t size,
 -                         dma_addr_t *dma_handle, gfp_t flags)
 +static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
 +                             int dir)
 +{
 +      intel_unmap_page(dev, dev_addr, size, dir, NULL);
 +}
 +
 +static void *intel_alloc_coherent(struct device *hwdev, size_t size,
 +                                dma_addr_t *dma_handle, gfp_t flags)
  {
        void *vaddr;
        int order;
        return NULL;
  }
  
 -void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 -                       dma_addr_t dma_handle)
 +static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 +                              dma_addr_t dma_handle)
  {
        int order;
  
  
  #define SG_ENT_VIRT_ADDRESS(sg)       (sg_virt((sg)))
  
 -void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
 -                  int nelems, int dir)
 +static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
 +                         int nelems, enum dma_data_direction dir,
 +                         struct dma_attrs *attrs)
  {
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
@@@ -2493,8 -2493,8 +2503,8 @@@ static int intel_nontranslate_map_sg(st
        return nelems;
  }
  
 -int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
 -               int dir)
 +static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
 +                      enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        void *addr;
        int i;
        return nelems;
  }
  
 -static struct dma_mapping_ops intel_dma_ops = {
 +static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
 +{
 +      return !dma_addr;
 +}
 +
 +struct dma_map_ops intel_dma_ops = {
        .alloc_coherent = intel_alloc_coherent,
        .free_coherent = intel_free_coherent,
 -      .map_single = intel_map_single,
 -      .unmap_single = intel_unmap_single,
        .map_sg = intel_map_sg,
        .unmap_sg = intel_unmap_sg,
 +      .map_page = intel_map_page,
 +      .unmap_page = intel_unmap_page,
 +      .mapping_error = intel_mapping_error,
  };
  
  static inline int iommu_domain_cache_init(void)
@@@ -3145,3 -3139,15 +3155,15 @@@ static struct iommu_ops intel_iommu_op
        .unmap          = intel_iommu_unmap_range,
        .iova_to_phys   = intel_iommu_iova_to_phys,
  };
+ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+       /*
+        * Mobile 4 Series Chipset neglects to set RWBF capability,
+        * but needs it:
+        */
+       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+       rwbf_quirk = 1;
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
index 43412aeddb53360c2dd5bbbd5301783442153c60,d2e3cbfba14f45ea30b34bc78e36735c5db2755d..4d6a0a2c00b07c34ed1f41503596844f6e70e4d4
@@@ -194,6 -194,7 +194,7 @@@ static inline void dmar_writeq(void __i
  /* FSTS_REG */
  #define DMA_FSTS_PPF ((u32)2)
  #define DMA_FSTS_PFO ((u32)1)
+ #define DMA_FSTS_IQE (1 << 4)
  #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
  
  /* FRCD_REG, 32 bits access */
@@@ -328,6 -329,13 +329,6 @@@ extern int qi_flush_iotlb(struct intel_
                          unsigned int size_order, u64 type,
                          int non_present_entry_flush);
  
- extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
  
 -extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
 -extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
 -extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
 -extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
 -extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
 -extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
 -
  #endif