]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/pci/intel-iommu.c
Merge branch 'linus' into cpus4096
[linux-2.6-omap-h63xx.git] / drivers / pci / intel-iommu.c
index 5c95a5a65440549028bef93d962c3b085993382e..3dfecb20d5e7949953e7fefa2796af4b48bef88f 100644 (file)
@@ -205,7 +205,7 @@ static inline bool dma_pte_present(struct dma_pte *pte)
 }
 
 /* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 < 0)
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
 
 /* domain represents a virtual machine, more than one devices
  * across iommus may be owned in one domain, e.g. kvm guest.
@@ -277,6 +277,8 @@ static int intel_iommu_strict;
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+static struct iommu_ops intel_iommu_ops;
+
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
@@ -436,7 +438,8 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
                        continue;
 
                for (i = 0; i < drhd->devices_cnt; i++)
-                       if (drhd->devices[i]->bus->number == bus &&
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
                            drhd->devices[i]->devfn == devfn)
                                return drhd->iommu;
 
@@ -2729,6 +2732,9 @@ int __init intel_iommu_init(void)
        init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
+
+       register_iommu(&intel_iommu_ops);
+
        return 0;
 }
 
@@ -2992,9 +2998,11 @@ static void intel_iommu_domain_destroy(struct iommu_domain *domain)
        vm_domain_exit(dmar_domain);
 }
 
-int intel_iommu_attach_device(struct dmar_domain *domain,
-                             struct pci_dev *pdev)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct intel_iommu *iommu;
        int addr_width;
        u64 end;
@@ -3006,7 +3014,7 @@ int intel_iommu_attach_device(struct dmar_domain *domain,
 
                old_domain = find_domain(pdev);
                if (old_domain) {
-                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
                                vm_domain_remove_one_dev_info(old_domain, pdev);
                        else
                                domain_remove_dev_info(old_domain);
@@ -3021,43 +3029,52 @@ int intel_iommu_attach_device(struct dmar_domain *domain,
        addr_width = agaw_to_width(iommu->agaw);
        end = DOMAIN_MAX_ADDR(addr_width);
        end = end & VTD_PAGE_MASK;
-       if (end < domain->max_addr) {
+       if (end < dmar_domain->max_addr) {
                printk(KERN_ERR "%s: iommu agaw (%d) is not "
                       "sufficient for the mapped address (%llx)\n",
-                      __func__, iommu->agaw, domain->max_addr);
+                      __func__, iommu->agaw, dmar_domain->max_addr);
                return -EFAULT;
        }
 
-       ret = domain_context_mapping(domain, pdev);
+       ret = domain_context_mapping(dmar_domain, pdev);
        if (ret)
                return ret;
 
-       ret = vm_domain_add_dev_info(domain, pdev);
+       ret = vm_domain_add_dev_info(dmar_domain, pdev);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_attach_device);
 
-void intel_iommu_detach_device(struct dmar_domain *domain,
-                              struct pci_dev *pdev)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
 {
-       vm_domain_remove_one_dev_info(domain, pdev);
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       vm_domain_remove_one_dev_info(dmar_domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_device);
 
-int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
-                           u64 hpa, size_t size, int prot)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+                                unsigned long iova, phys_addr_t hpa,
+                                size_t size, int iommu_prot)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        u64 max_addr;
        int addr_width;
+       int prot = 0;
        int ret;
 
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+
        max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
-       if (domain->max_addr < max_addr) {
+       if (dmar_domain->max_addr < max_addr) {
                int min_agaw;
                u64 end;
 
                /* check if minimum agaw is sufficient for mapped address */
-               min_agaw = vm_domain_min_agaw(domain);
+               min_agaw = vm_domain_min_agaw(dmar_domain);
                addr_width = agaw_to_width(min_agaw);
                end = DOMAIN_MAX_ADDR(addr_width);
                end = end & VTD_PAGE_MASK;
@@ -3067,44 +3084,48 @@ int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova,
                               __func__, min_agaw, max_addr);
                        return -EFAULT;
                }
-               domain->max_addr = max_addr;
+               dmar_domain->max_addr = max_addr;
        }
 
-       ret = domain_page_mapping(domain, iova, hpa, size, prot);
+       ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_map_address);
 
-void intel_iommu_unmap_address(struct dmar_domain *domain,
-                              dma_addr_t iova, size_t size)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        dma_addr_t base;
 
        /* The address might not be aligned */
        base = iova & VTD_PAGE_MASK;
        size = VTD_PAGE_ALIGN(size);
-       dma_pte_clear_range(domain, base, base + size);
+       dma_pte_clear_range(dmar_domain, base, base + size);
 
-       if (domain->max_addr == base + size)
-               domain->max_addr = base;
+       if (dmar_domain->max_addr == base + size)
+               dmar_domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_unmap_address);
 
-int intel_iommu_found(void)
-{
-       return g_num_of_iommus;
-}
-EXPORT_SYMBOL_GPL(intel_iommu_found);
-
-u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        struct dma_pte *pte;
        u64 phys = 0;
 
-       pte = addr_to_dma_pte(domain, iova);
+       pte = addr_to_dma_pte(dmar_domain, iova);
        if (pte)
                phys = dma_pte_addr(pte);
 
        return phys;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_phys);
+
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map_range,
+       .unmap          = intel_iommu_unmap_range,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+};